def add_tools_channel(server_id, action_id, dry_run=0): log_debug(3) if (not dry_run): subscribe_to_tools_channel(server_id) else: log_debug(4, "dry run requested") raise ShadowAction("Subscribed guest to tools channel.")
def __init__(self, list_ifaces=None): log_debug(4, list_ifaces) self.ifaces = {} self.db_ifaces = [] # parameters which are not allowed to be empty and set to NULL self._autonull = ('address', 'netmask') self.sequence = "rhn_srv_net_iface_id_seq" if not list_ifaces: return for info in list_ifaces: if not isinstance(info, type({})): raise rhnFault(53, "Unexpected format for interface %s" % info) vdict = {} for key, mapping in self.key_mapping.items(): # Look at the mapping first; if not found, look for the key if info.has_key(mapping): k = mapping else: k = key if not info.has_key(k): raise rhnFault(53, "Unable to find required field %s" % (key)) val = info[k] if mapping in ['ip_addr', 'netmask', 'broadcast', 'address']: # bugzilla: 129840 kudzu (rhpl) will sometimes pad octets # with leading zeros, causing confusion; clean those up val = self.cleanse_ip_addr(val) vdict[mapping] = val self.ifaces[vdict['address']] = vdict
def auth_proxy(): """ Authenticates a proxy carrying a clients request. For a valid or unsigned request, this function returns 1 (OK), otherwise it raises rhnFault NOTE: X-RHN-Proxy-Auth described in proxy/broker/rhnProxyAuth.py """ log_debug(3) headers = rhnFlags.get('outputTransportOptions') if not rhnFlags.test('X-RHN-Proxy-Auth'): # No auth information; decline any action log_debug(4, "declined proxy authentication") headers['X-RHN-Proxy-Auth-Error'] = '%s:%s' % ( 1003, _("declined proxy authentication")) raise rhnFault(1003) # Invalid session key # NOTE: # - < v3.1 RHN proxies send only 1 token in this header # - > v3.1: we send the route of the requests via multiple tokens # "token1:hostname1,token2:hostname2" the first tuple is the first # proxy hit. tokens = string.split(rhnFlags.get('X-RHN-Proxy-Auth'), ',') tokens = filter(lambda token: token, tokens) for auth_token in tokens: _verifyProxyAuthToken(auth_token) # if no rhnFault was raised then the tokens all passed return 1
def schedule_virt_guest_pkg_install(server_id, action_id, dry_run=0): """ ShadowAction that schedules a package installation action for the rhn-virtualization-guest package. """ log_debug(3) virt_host_package_name = "rhn-virtualization-guest" tools_channel = SubscribedChannel(server_id, "rhn-tools") found_tools_channel = tools_channel.is_subscribed_to_channel() if not found_tools_channel: raise InvalidAction("System not subscribed to the RHN Tools channel.") rhn_v12n_package = ChannelPackage(server_id, virt_host_package_name) if not rhn_v12n_package.exists(): raise InvalidAction("Could not find the rhn-virtualization-guest package.") try: install_scheduler = PackageInstallScheduler(server_id, action_id, rhn_v12n_package) if (not dry_run): install_scheduler.schedule_package_install() else: log_debug(4, "dry run requested") except NoActionInfo, nai: raise InvalidAction(str(nai)), None, sys.exc_info()[2]
def __init__(self, dict=None): log_debug(4, dict) self.ifaces = {} self.db_ifaces = [] # parameters which are not allowed to be empty and set to NULL self._autonull = ('hw_addr', 'module') if not dict: return for name, info in dict.items(): if name == 'class': # Ignore it continue if not isinstance(info, type({})): raise rhnFault(53, "Unexpected format for interface %s" % name) vdict = {} for key, mapping in self.key_mapping.items(): # Look at the mapping first; if not found, look for the key if info.has_key(mapping): k = mapping else: k = key if not info.has_key(k): raise rhnFault(53, "Unable to find required field %s" % key) val = info[k] vdict[mapping] = val if 'ipaddr' in info and info['ipaddr']: vdict['ipv4'] = NetIfaceAddress4( [{'ipaddr': info['ipaddr'], 'broadcast': info['broadcast'], 'netmask': info['netmask']}]) if 'ipv6' in info and info['ipv6']: vdict['ipv6'] = NetIfaceAddress6(info["ipv6"]) self.ifaces[name] = vdict
def __init__(self): log_debug(3) RPC_Base.__init__(self) self.functions.append('uploadPackageInfo') self.functions.append('uploadPackageInfoBySession') self.functions.append('uploadSourcePackageInfo') self.functions.append('uploadSourcePackageInfoBySession') self.functions.append('listChannel') self.functions.append('listChannelBySession') self.functions.append('listChannelChecksum') self.functions.append('listChannelChecksumBySession') self.functions.append('listChannelSource') self.functions.append('listChannelSourceBySession') self.functions.append('listMissingSourcePackages') self.functions.append('listMissingSourcePackagesBySession') self.functions.append('channelPackageSubscription') self.functions.append('channelPackageSubscriptionBySession') self.functions.append('no_op') self.functions.append('test_login') self.functions.append('test_new_login') self.functions.append('test_check_session') self.functions.append('login') self.functions.append('check_session') self.functions.append('getPackageChecksum') self.functions.append('getPackageChecksumBySession') self.functions.append('getSourcePackageChecksum') self.functions.append('getSourcePackageChecksumBySession') # old MD5 compatibility functions self.functions.append('getPackageMD5sum') self.functions.append('getPackageMD5sumBySession') self.functions.append('getSourcePackageMD5sum') self.functions.append('getSourcePackageMD5sumBySession')
def _getSourcePackageChecksum(self, org_id, pkg_infos): """ Gives checksum info of available source packages. Also does an existance check on the filesystem. """ log_debug(3) statement = """ select ps.path path, c.checksum, c.checksum_type from rhnSourceRpm sr, rhnPackageSource ps, rhnChecksumView c where sr.name = :name and ps.source_rpm_id = sr.id and ( ps.org_id = :orgid or ( ps.org_id is null and :orgid is null ) ) and ps.checksum_id = c.id """ h = rhnSQL.prepare(statement) row_list = {} for pkg in pkg_infos.keys(): row_list[pkg] = self._get_package_checksum(h, {'name': pkg, 'orgid': org_id}) return row_list
def test_login(self, username, password): log_debug(5, username) try: authobj = auth(username, password) except: return 0 return 1
def getAnyChecksum(self, info, username=None, password=None, session=None, is_source=0): """ returns checksum info of available packages also does an existance check on the filesystem. """ log_debug(3) pkg_infos = info.get('packages') channels = info.get('channels', []) force = info.get('force', 0) orgid = info.get('org_id') if orgid == 'null': null_org = 1 else: null_org = None if not session: org_id, force = rhnPackageUpload.authenticate(username, password, channels=channels, null_org=null_org, force=force) else: try: org_id, force = rhnPackageUpload.authenticate_session( session, channels=channels, null_org=null_org, force=force) except rhnSession.InvalidSessionError: raise_with_tb(rhnFault(33), sys.exc_info()[2]) except rhnSession.ExpiredSessionError: raise_with_tb(rhnFault(34), sys.exc_info()[2]) if is_source: ret = self._getSourcePackageChecksum(org_id, pkg_infos) else: ret = self._getPackageChecksum(org_id, pkg_infos) return ret
def token_server_groups(server_id, tokens_obj): """ Handle server group subscriptions for the registration token """ assert(isinstance(tokens_obj, ActivationTokens)) h = rhnSQL.prepare(_query_token_server_groups) server_groups = {} for token in tokens_obj.tokens: token_id = token['token_id'] h.execute(token_id=token_id) while 1: row = h.fetchone_dict() if not row: break server_group_id = row['server_group_id'] server_groups[server_group_id] = row # Now try to subscribe server to group ret = [] for server_group_id, sg in server_groups.items(): log_debug(4, "token server group", sg) try: join_server_group(server_id, server_group_id) except rhnSQL.SQLError, e: log_error("Failed to add server to group", server_id, server_group_id, sg["name"]) raise rhnFault(80, _("Failed to add server to group %s") % sg["name"]), None, sys.exc_info()[2] else: ret.append("Subscribed to server group '%s'" % sg["name"])
def validate_new_password(password): """ Perform all the checks required for new passwords """ log_debug(3, "Entered validate_new_password") # # We're copying the code because we don't want to # invalidate any of the existing passwords. # # Validate password based on configurable length # regular expression if not password: raise rhnFault(12) if len(password) < CFG.MIN_PASSWD_LEN: raise rhnFault(14, _("password must be at least %d characters") % CFG.MIN_PASSWD_LEN) if len(password) > CFG.MAX_PASSWD_LEN: raise rhnFault(701, _("Password must be shorter than %d characters") % CFG.MAX_PASSWD_LEN) password = password[:CFG.MAX_PASSWD_LEN] invalid_re = re.compile( r"[^ A-Za-z0-9`!@#$%^&*()-_=+[{\]}\\|;:'\",<.>/?~]") asterisks_re = re.compile(r"^\**$") # make sure the password isn't all *'s tmp = asterisks_re.match(password) if tmp is not None: raise rhnFault(15, "password cannot be all asterisks '*'") # make sure we have only printable characters tmp = invalid_re.search(password) if tmp is not None: pos = tmp.regs[0] raise rhnFault(15, _("password contains character `%s'") % password[pos[1] - 1])
def _cacheObj(self, fileName, version, dataProducer, params=None): """ The real workhorse for all flavors of listall It tries to pull data out of a file; if it doesn't work, it calls the data producer with the specified params to generate the data, which is also cached. Returns a string from a cache file or, if the cache file is not there, calls dataProducer to generate the object and caches the results """ log_debug(4, fileName, version, params) fileDir = self._getPkgListDir() filePath = "%s/%s-%s" % (fileDir, fileName, version) if os.access(filePath, os.R_OK): # Slurp the file f = open(filePath, "r") data = f.read() f.close() return data # The file's not there; query the DB or whatever dataproducer used. if params is None: params = () stringObject = dataProducer(*params) # Cache the thing cache(stringObject, fileDir, fileName, version) # Return the string return stringObject
def update(server_id, action_id, data={}): log_debug(3, server_id, action_id) action_status = rhnFlags.get('action_status') if action_status == 3: # Action failed kickstart_state = 'failed' next_action_type = None else: kickstart_state = 'deployed' #This is horrendous, but in order to fix it I would have to change almost all of the #actions code, which we don't have time to do for the 500 beta. --wregglej try: ks_session_type = server_kickstart.get_kickstart_session_type(server_id, action_id) except rhnException, re: ks_session_type = None if ks_session_type is None: next_action_type = "None" elif ks_session_type == 'para_guest': next_action_type = 'kickstart_guest.initiate' else: next_action_type = 'kickstart.initiate'
def getPackagePath(self, pkgFilename, redirect_capable=0): """ Retrieves package path Overloads getPackagePath in common/rhnRepository. checks if redirect and hosted; makes a call to query the db for pkg_location """ log_debug(2, pkgFilename, redirect_capable) # check for re-direct check flag from header to issue package # request from client in order to avoid failover loops. skip_redirect = rhnFlags.get('x-rhn-redirect') log_debug(3, "check flag for X-RHN-REDIRECT ::", skip_redirect) # get the redirect and local paths remotepath, localpath = self.getAllPackagePaths(pkgFilename) # check for redirect conditions and fail over checks if redirect_capable and not CFG.SATELLITE and not skip_redirect \ and remotepath is not None: self.redirect_location = remotepath # We've set self.redirect_location, we're done here # we throw a redirectException in _getFile method. return None # Package cannot be served from the edge, we serve it ourselves return localpath
def getSourcePackagePath(self, pkgFilename): """ OVERLOADS getSourcePackagePath in common/rhnRepository. snag src.rpm and nosrc.rpm from local repo, after ensuring we are authorized to fetch it. """ log_debug(3, pkgFilename) if pkgFilename[-8:] != '.src.rpm' and pkgFilename[-10:] != '.nosrc.rpm': raise rhnFault(17, _("Invalid SRPM package requested: %s") % pkgFilename) # Connect to the server to get an authorization for downloading this # package server = rpclib.Server(self.rhnParentXMLRPC, proxy=self.httpProxy, username=self.httpProxyUsername, password=self.httpProxyPassword) if self.caChain: server.add_trusted_cert(self.caChain) try: retval = server.proxy.package_source_in_channel( pkgFilename, self.channelName, self.clientInfo) except xmlrpclib.Fault, e: raise rhnFault(1000, _("Error retrieving source package: %s") % str(e)), None, sys.exc_info()[2]
def _repodata_python(self, file_name): log_debug(3, 'repodata', file_name) c_info = rhnChannel.channel_info(self.channelName) repo = repository.get_repository(c_info) output = None content_type = "application/x-gzip" if file_name == "repomd.xml": content_type = "text/xml" output = repo.get_repomd_file() elif file_name == "primary.xml.gz": output = repo.get_primary_xml_file() elif file_name == "other.xml.gz": output = repo.get_other_xml_file() elif file_name == "filelists.xml.gz": output = repo.get_filelists_xml_file() elif file_name == "updateinfo.xml.gz": output = repo.get_updateinfo_xml_file() elif file_name == "comps.xml": content_type = "text/xml" output = repo.get_comps_file() else: log_debug(2, "Unknown repomd file requested: %s" % file_name) raise rhnFault(6) output = rpclib.transports.File(output, name=file_name) rhnFlags.set('Content-Type', content_type) return output
def _repodata_taskomatic(self, file_name): log_debug(3, 'repodata', file_name) content_type = "application/x-gzip" if file_name in ["repomd.xml", "comps.xml"]: content_type = "text/xml" elif file_name not in ["primary.xml.gz", "other.xml.gz", "filelists.xml.gz", "updateinfo.xml.gz", "Packages.gz"]: log_debug(2, "Unknown repomd file requested: %s" % file_name) raise rhnFault(6) # XXX this won't be repconned or CDNd if file_name == "comps.xml": return self._repodata_python(file_name) file_path = "%s/%s/%s" % (CFG.REPOMD_PATH_PREFIX, self.channelName, file_name) rhnFlags.set('Content-Type', content_type) try: rhnFlags.set('Download-Accelerator-Path', file_path) return self._getFile(CFG.REPOMD_CACHE_MOUNT_POINT + "/" + file_path) except IOError, e: # For file not found, queue up a regen, and return 404 if e.errno == 2 and file_name != "comps.xml": taskomatic.add_to_repodata_queue(self.channelName, "repodata request", file_name, bypass_filters=True) rhnSQL.commit() # This returns 404 to the client raise rhnFault(6), None, sys.exc_info()[2] raise
def _future_actions_enabled(self): """ Returns true if staging content is enabled for this system """ h = rhnSQL.prepare(self._query_future_enabled) h.execute(server_id=self.server_id) row = h.fetchone_dict() log_debug(4, row["staging_content_enabled"]) return row["staging_content_enabled"] == "Y"
def __getV2(self, action, dry_run=0): """ Fetches queued actions for the clients version 2+. """ log_debug(3, self.server_id) # Get the root dir of this install try: method = getMethod.getMethod(action['method'], 'server.action') except getMethod.GetMethodException: Traceback("queue.get V2") raise EmptyAction("Could not get a valid method for %s" % ( action['method'],)), None, sys.exc_info()[2] # Call the method result = method(self.server_id, action['id'], dry_run) if result is None: # None are mapped to the empty list result = () elif not isinstance(result, TupleType): # Everything other than a tuple is wrapped in a tuple result = (result, ) xmlblob = xmlrpclib.dumps(result, methodname=action['method']) log_debug(5, "returning xmlblob for action", xmlblob) return { 'id': action['id'], 'action': xmlblob, 'version': action['version'], }
def _delete_files(relpaths): for relpath in relpaths: path = os.path.join(CFG.MOUNT_POINT, relpath) if not os.path.exists(path): log_debug(1, "Not removing %s: no such file" % path) continue unlink_package_file(path)
def _delete_ks_files(channel_labels): sql = """ select kt.base_path from rhnChannel c join rhnKickstartableTree kt on c.id = kt.channel_id where c.label in (%s) and not exists ( select 1 from rhnKickstartableTree ktx join rhnChannel cx on cx.id = ktx.channel_id where replace(ktx.base_path, :mnt_point, '') = replace(kt.base_path, :mnt_point, '') and cx.label not in (%s)) """ params, bind_params = _bind_many(channel_labels) params['mnt_point'] = CFG.MOUNT_POINT + '/' bind_params = ', '.join(bind_params) h = rhnSQL.prepare(sql % (bind_params, bind_params)) h.execute(**params) kickstart_list = h.fetchall_dict() or [] for kickstart in kickstart_list: path = os.path.join(CFG.MOUNT_POINT, str(kickstart['base_path'])) if not os.path.exists(path): log_debug(1, "Not removing %s: no such file" % path) continue shutil.rmtree(path)
def reload(self, server, reload_all=0): log_debug(4, server, "reload_all = %d" % reload_all) if not self.server.load(int(server)): log_error("Could not find server record for reload", server) raise rhnFault(29, "Could not find server record in the database") self.cert = None # it is lame that we have to do this h = rhnSQL.prepare(""" select label from rhnServerArch where id = :archid """) h.execute(archid=self.server["server_arch_id"]) data = h.fetchone_dict() if not data: raise rhnException("Found server with invalid numeric " "architecture reference", self.server.data) self.archname = data['label'] # we don't know this one anymore (well, we could look for, but # why would we do that?) self.user = None # XXX: Fix me if reload_all: if not self.reload_packages_byid(self.server["id"]) == 0: return -1 if not self.reload_hardware_byid(self.server["id"]) == 0: return -1 return 0
def _execute_wrapper(self, function, *p, **kw): params = ','.join(["%s: %s" % (key, value) for key, value in list(kw.items())]) log_debug(5, "Executing SQL: \"%s\" with bind params: {%s}" % (self.sql, params)) if self.sql is None: raise rhnException("Cannot execute empty cursor") if self.blob_map: for blob_var in list(self.blob_map.keys()): kw[blob_var] = BufferType(kw[blob_var]) try: retval = function(*p, **kw) except psycopg2.InternalError: e = sys.exc_info()[1] error_code = 99999 m = re.match('ERROR: +-([0-9]+)', e.pgerror) if m: error_code = int(m.group(1)) raise sql_base.SQLSchemaError(error_code, e.pgerror, e) except psycopg2.ProgrammingError: e = sys.exc_info()[1] raise sql_base.SQLStatementPrepareError(self.dbh, e.pgerror, self.sql) except KeyError: e = sys.exc_info()[1] raise sql_base.SQLError("Unable to bound the following variable(s): %s" % (string.join(e.args, " "))) return retval
def __processPackage(package, org_id, channels, source): log_debug(4, org_id, channels, source) if 'md5sum' in package: # for old rhnpush compatibility package['checksum_type'] = 'md5' package['checksum'] = package['md5sum'] del(package['md5sum']) if 'checksum' not in package: raise rhnFault(50, "The package's checksum digest has not been specified") if 'packageSize' not in package: raise rhnFault(50, "The package size has not been specified") header = rhn_rpm.headerLoad(package['header'].data) if not header: raise rhnFault(50) packageSize = package['packageSize'] relpath = package.get('relativePath') if 'header_start' in package: header_start = package['header_start'] else: header_start = 0 if 'header_end' in package: header_end = package['header_end'] else: # Just say the whole package header_end = packageSize checksum_type = package['checksum_type'] checksum = package['checksum'] p = createPackage(header, packageSize, checksum_type, checksum, relpath, org_id, header_start, header_end, channels) return p
def process(self): log_debug(3) # nice thing that req has a read() method, so it makes it look just # like an fd try: fd = self.input.decode(self.req) except IOError: # client timed out return apache.HTTP_BAD_REQUEST # Read the data from the request _body = fd.read() fd.close() # In this case, we talk to a client (maybe through a proxy) # make sure we have something to decode if _body is None or len(_body) == 0: return apache.HTTP_BAD_REQUEST # Decode the request; avoid logging crappy responses try: params, method = self.decode(_body) except xmlrpclib.ResponseError: log_error("Got bad XML-RPC blob of len = %d" % len(_body)) return apache.HTTP_BAD_REQUEST else: if params is None: params = () # make the actual function call and return the result return self.call_function(method, params)
def _store_file(self, action_id, scap_file): r_dir = get_action_path(self.server.server['org_id'], self.server_id, action_id) if not r_dir: log_debug(1, self.server_id, "Error composing SCAP action directory path") raise rhnFault(5102) r_file = get_actionfile_path(self.server.server['org_id'], self.server_id, action_id, scap_file['filename']) if not r_file: log_debug(1, self.server_id, "Error composing SCAP action file path") raise rhnFault(5103) if not scap_file['content-encoding'] == 'base64': log_debug(1, self.server_id, "Invalid content encoding: %s" % scap_file['content-encoding']) raise rhnFault(5104) # Create the file on filer filecontent = decodestring(scap_file['filecontent']) # TODO assert for the size of the file absolute_dir = os.path.join(CFG.MOUNT_POINT, r_dir) absolute_file = os.path.join(absolute_dir, scap_file['filename']) if not os.path.exists(absolute_dir): log_debug(1, self.server_id, "Creating action directory: %s" % absolute_dir) os.makedirs(absolute_dir) mode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH os.chmod(absolute_dir, mode) os.chmod(os.path.dirname(os.path.normpath(absolute_dir)), mode) log_debug(1, self.server_id, "Creating file: %s" % absolute_file) f = open(absolute_file, 'w+') f.write(filecontent) return {'result': True, }
def schedulePoller(server_id, action_id, dry_run=0): log_debug(3, dry_run) prepared_query = rhnSQL.prepare(_query_schedulePoller) prepared_query.execute(action_id=action_id) row = prepared_query.fetchone_dict() if not row: raise InvalidAction("No schedulePoller actions found.") if not row.has_key('minute'): raise InvalidAction("schedulePoller action %s has no minute associated with it." % str(action_id)) if not row.has_key('hour'): raise InvalidAction("schedulePoller action %s has no hour associated with it." % str(action_id)) if not row.has_key('dom'): raise InvalidAction("schedulePoller action %s has no day of the month associated with it." % str(action_id)) if not row.has_key('month'): raise InvalidAction("schedulePoller action %s has no month associated with it." % str(action_id)) if not row.has_key('dow'): raise InvalidAction("schedulePoller action %s has no day of the week associated with it." % str(action_id)) return (row['minute'], row['hour'], row['dom'], row['month'], row['dow'])
def __call__(self, *args): log_debug(2, self.name, args) # Buildup a string for the positional arguments to the procedure: positional_args = "" i = 1 for arg in args: if len(positional_args) == 0: positional_args = "%s" else: positional_args = positional_args + ", %s" i += 1 query = "SELECT %s(%s)" % (self.name, positional_args) log_debug(2, query, args) try: ret = self.cursor.execute(query, args) except psycopg2.Error: e = sys.exc_info()[1] error_code = 99999 m = re.match('ERROR: +-([0-9]+)', e.pgerror) if m: error_code = int(m.group(1)) raise sql_base.SQLSchemaError(error_code, e.pgerror, e) if self.ret_type is None: return ret else: return self.cursor.fetchone()[0]
def initiate(server_id, action_id, dry_run=0): log_debug(3) h = rhnSQL.prepare(_query_initiate_guest) h.execute(action_id=action_id) row = h.fetchone_dict() if not row: raise InvalidAction("Kickstart action without an associated kickstart") kickstart_host = row['kickstart_host'] virt_type = row['virt_type'] name = row['guest_name'] boot_image = "spacewalk-koan" append_string = row['append_string'] vcpus = row['vcpus'] disk_gb = row['disk_gb'] mem_kb = row['mem_kb'] ks_session_id = row['ks_session_id'] virt_bridge = row['virt_bridge'] disk_path = row['disk_path'] cobbler_system_name = row['cobbler_system_name'] if not boot_image: raise InvalidAction("Boot image missing") return (kickstart_host, cobbler_system_name, virt_type, ks_session_id, name, mem_kb, vcpus, disk_gb, virt_bridge, disk_path, append_string)
def _delete_rpm_group(packageIds): references = [ 'rhnChannelPackage', 'rhnErrataPackage', 'rhnErrataPackageTMP', 'rhnPackageChangelogRec', 'rhnPackageConflicts', 'rhnPackageFile', 'rhnPackageObsoletes', 'rhnPackageProvides', 'rhnPackageRequires', 'rhnPackageRecommends', 'rhnPackageSuggests', 'rhnPackageSupplements', 'rhnPackageEnhances', 'rhnPackageBreaks', 'rhnPackagePredepends', 'rhnServerNeededCache', ] deleteStatement = "delete from %s where package_id = :package_id" for table in references: h = rhnSQL.prepare(deleteStatement % table) count = h.executemany(package_id=packageIds) log_debug(3, "Deleted from %s: %d rows" % (table, count)) deleteStatement = "delete from rhnPackage where id = :package_id" h = rhnSQL.prepare(deleteStatement) count = h.executemany(package_id=packageIds) if count: log_debug(2, "DELETED package id %s" % str(packageIds)) else: log_error("No such package id %s" % str(packageIds)) rhnSQL.commit()
def set_secret(self, secret): """ set the secret of the entry and recompute the checksum """ log_debug(4, "secret", secret) self.__secret = secret self.__checksum = self.compute_checksum(secret)
def client_get_delimiters(self, systemid): log_debug(1) self.auth_system(systemid) return self._get_delimiters()
def client_get_maximum_file_size(self, systemid): log_debug(1) self.auth_system(systemid) return self._get_maximum_file_size()
def check_entitlement(self): if not self.server.has_key("id"): return None log_debug(3, self.server["id"]) return server_lib.check_entitlement(self.server['id'])
def __save(self, channel): tokens_obj = rhnFlags.get("registration_token") if self.server.real: server_id = self.server["id"] self.server.save() else: # create new entry self.gen_secret() server_id = self.getid() org_id = self.server["org_id"] if self.user: user_id = self.user.getid() else: user_id = None # some more default values self.server["auto_update"] = "N" if self.user and not self.server.has_key("creator_id"): # save the link to the user that created it if we have # that information self.server["creator_id"] = self.user.getid() # and create the server entry self.server.create(server_id) server_lib.create_server_setup(server_id, org_id) self.handle_virtual_guest() # if we're using a token, then the following channel # subscription request can allow no matches since the # token code will fix up or fail miserably later. # subscribe the server to applicable channels # bretm 02/17/2007 -- TODO: refactor activation key codepaths # to allow us to not have to pass in none_ok=1 in any case # # This can now throw exceptions which will be caught at a higher level if channel is not None: channel_info = dict(rhnChannel.channel_info(channel)) log_debug(4, "eus channel id %s" % str(channel_info)) rhnChannel.subscribe_sql(server_id, channel_info['id']) else: rhnChannel.subscribe_server_channels(self, none_ok=tokens_obj, user_id=user_id) if not tokens_obj: # Attempt to auto-entitle, can throw the following exceptions: # rhnSystemEntitlementException # rhnNoSystemEntitlementsException self.autoentitle() # If a new server that was registered by an user (i.e. not # with a registration token), look for this user's default # groups self.join_groups() server_lib.join_rhn(org_id) # Update virtual guest attributes on re-registration if getattr(tokens_obj, "is_rereg_token", False): self.handle_virtual_guest() # Update the uuid - but don't commit yet self.update_uuid(self.uuid, commit=0) self.create_perm_cache() # And save the extra profile data... self.save_suse_products_byid(server_id) self.save_packages_byid(server_id, schedule=1) self.save_hardware_byid(server_id) self.save_history_byid(server_id) return 0
def create_perm_cache(self): log_debug(4) create_perms = rhnSQL.Procedure("rhn_cache.update_perms_for_server") create_perms(self.server['id'])
def change_base_channel(self, new_rel, suse_products=None): log_debug(3, self.server["id"], new_rel) old_rel = self.server["release"] current_channels = rhnChannel.channels_for_server(self.server["id"]) # Extract the base channel off of old_base = [x for x in current_channels if not x['parent_channel']] # Quick sanity check base_channels_count = len(old_base) if base_channels_count == 1: old_base = old_base[0] elif base_channels_count == 0: old_base = None else: raise rhnException( "Server %s subscribed to multiple base channels" % (self.server["id"], )) # bz 442355 # Leave custom base channels alone, don't alter any of the channel subscriptions if not CFG.RESET_BASE_CHANNEL and old_base and rhnChannel.isCustomChannel( old_base["id"]): log_debug( 3, "Custom base channel detected, will not alter channel subscriptions" ) self.server["release"] = new_rel self.server.save() msg = """The SUSE Manager Update Agent has detected a change in the base version of the operating system running on your system, additionally you are subscribed to a custom channel as your base channel. Due to this configuration your channel subscriptions will not be altered. """ self.add_history( "Updated system release from %s to %s" % (old_rel, new_rel), msg) self.save_history_byid(self.server["id"]) return 1 s = rhnChannel.LiteServer().init_from_server(self) s.release = new_rel s.arch = self.archname if suse_products: s.suse_products = suse_products # Let get_server_channels deal with the errors and raise rhnFault target_channels = rhnChannel.guess_channels_for_server(s, none_ok=True) if target_channels: target_base = filter(lambda x: not x['parent_channel'], target_channels)[0] else: target_base = None channels_to_subscribe = [] channels_to_unsubscribe = [] if old_base and target_base and old_base['id'] == target_base['id']: # Same base channel. Preserve the currently subscribed child # channels, just add the ones that are missing hash = {} for c in current_channels: hash[c['id']] = c for c in target_channels: channel_id = c['id'] if channel_id in hash: # Already subscribed to this one del hash[channel_id] continue # Have to subscribe to this one channels_to_subscribe.append(c) # We don't want to lose subscriptions to prior channels, so don't # do anything with hash.values() else: # Different base channel channels_to_unsubscribe = current_channels channels_to_subscribe = target_channels rhnSQL.transaction("change_base_channel") self.server["release"] = new_rel self.server.save() if not (channels_to_subscribe or channels_to_unsubscribe): # Nothing to do, just add the history entry self.add_history("Updated system release from %s to %s" % (old_rel, new_rel)) self.save_history_byid(self.server["id"]) return 1 # XXX: need a way to preserve existing subscriptions to # families so we can restore access to non-public ones. rhnChannel.unsubscribe_channels(self.server["id"], channels_to_unsubscribe) rhnChannel.subscribe_channels(self.server["id"], channels_to_subscribe) # now that we changed, recompute the errata cache for this one rhnSQL.Procedure("queue_server")(self.server["id"]) # Make a history note sub_channels = rhnChannel.channels_for_server(self.server["id"]) if sub_channels: channel_list = [a["name"] for a in sub_channels] msg = """The SUSE Manager Update Agent has detected a change in the base version of the operating system running on your system and has updated your channel subscriptions to reflect that. Your server has been automatically subscribed to the following channels:\n%s\n""" % (string.join(channel_list, "\n"), ) else: msg = """*** ERROR: *** While trying to subscribe this server to software channels: There are no channels serving release %s""" % new_rel self.add_history( "Updated system release from %s to %s" % (old_rel, new_rel), msg) self.save_history_byid(self.server["id"]) return 1
def package_source_in_channel(self, package, channel, auth_token): """ Validates the client request for a source package download """ log_debug(3, package, channel) server = self.auth_client(auth_token) return rhnPackage.package_source_in_channel(self.server_id, package, channel)
def update_client_capabilities(server_id): caps = get_client_capabilities() if caps is None: caps = {} caps = caps.copy() h = rhnSQL.prepare(""" select cc.capability_name_id, ccn.name capability, cc.version from rhnClientCapability cc, rhnClientCapabilityName ccn where cc.server_id = :server_id and cc.capability_name_id = ccn.id """) updates = {'server_id': [], 'capability_name_id': [], 'version': []} deletes = {'server_id': [], 'capability_name_id': []} inserts = {'server_id': [], 'capability': [], 'version': []} h.execute(server_id=server_id) while 1: row = h.fetchone_dict() if not row: break name = row['capability'] version = row['version'] capability_name_id = row['capability_name_id'] if name in caps: local_ver = caps[name]['version'] del caps[name] if local_ver == version: # Nothing to do - same version continue updates['server_id'].append(server_id) updates['capability_name_id'].append(capability_name_id) updates['version'].append(local_ver) continue # Have to delete it deletes['server_id'].append(server_id) deletes['capability_name_id'].append(capability_name_id) # Everything else has to be inserted for name, hash in list(caps.items()): inserts['server_id'].append(server_id) inserts['capability'].append(name) inserts['version'].append(hash['version']) log_debug(5, "Deletes:", deletes) log_debug(5, "Updates:", updates) log_debug(5, "Inserts:", inserts) if deletes['server_id']: h = rhnSQL.prepare(""" delete from rhnClientCapability where server_id = :server_id and capability_name_id = :capability_name_id """) h.executemany(**deletes) if updates['server_id']: h = rhnSQL.prepare(""" update rhnClientCapability set version = :version where server_id = :server_id and capability_name_id = :capability_name_id """) h.executemany(**updates) if inserts['server_id']: h = rhnSQL.prepare(""" insert into rhnClientCapability (server_id, capability_name_id, version) values (:server_id, LOOKUP_CLIENT_CAPABILITY(:capability), :version) """) h.executemany(**inserts) # Commit work. This can be dangerous if there is previously uncommited # work rhnSQL.commit()
def getKickstartSessionChannel(self, kickstart, session, system_id): """ Gets channel information for this kickstart tree""" log_debug(5, kickstart, session) # authenticate that this request is initiated from a proxy self.auth_system(system_id) return self.__getKickstartSessionChannel(kickstart, session)
def runTransaction(server_id, action_id, dry_run=0): log_debug(3, server_id, action_id, dry_run) # Fetch package_delta_id h = rhnSQL.prepare(""" select package_delta_id from rhnActionPackageDelta where action_id = :action_id """) h.execute(action_id=action_id) row = h.fetchone_dict() if row is None: raise InvalidAction("invalid packages.runTransaction action %s for server %s" % (action_id, server_id)) package_delta_id = row['package_delta_id'] # Fetch packages h = rhnSQL.prepare(""" select tro.label as operation, pn.name, pe.version, pe.release, pe.epoch, pa.label as package_arch from rhnPackageDeltaElement pde, rhnTransactionPackage rp left join rhnPackageArch pa on rp.package_arch_id = pa.id, rhnTransactionOperation tro, rhnPackageName pn, rhnPackageEVR pe where pde.package_delta_id = :package_delta_id and pde.transaction_package_id = rp.id and rp.operation = tro.id and rp.name_id = pn.id and rp.evr_id = pe.id order by tro.label, pn.name """) h.execute(package_delta_id=package_delta_id) result = [] while 1: row = h.fetchone_dict() if not row: break operation = row['operation'] # Need to map the operations into codes the client/rpm understands if operation == 'insert': operation = 'i' elif operation == 'delete': operation = 'e' elif operation == 'upgrade': operation = 'u' else: # Unsupported continue # Fix null epochs epoch = row['epoch'] if epoch is None: epoch = '' name, version, release = row['name'], row['version'], row['release'] # The package arch can be null now because of the outer join package_arch = row['package_arch'] or "" result.append([ [name, version, release, epoch, package_arch], operation ]) return {'packages': result}
def use_satellite(server_id, action_id, data={}): log_debug(3, action_id)
def _push_file(self, config_channel_id, file): if not file: # Nothing to do return {} # Check for full path on the file path = file.get('path') if not (path[0] == os.sep): raise ConfigFilePathIncomplete(file) if 'config_file_type_id' not in file: log_debug(4, "Client does not support config directories, so set file_type_id to 1") file['config_file_type_id'] = '1' # Check if delimiters are present if self._is_file(file) and \ not (file.get('delim_start') and file.get('delim_end')): # Need delimiters raise ConfigFileMissingDelimError(file) if not (file.get('user') and file.get('group') and file.get('mode') is not None) and not self._is_link(file): raise ConfigFileMissingInfoError(file) # Oracle doesn't like certain binding variables file['username'] = file.get('user', '') file['groupname'] = file.get('group', '') file['file_mode'] = str(file.get('mode', '')) # if the selinux flag is not sent by the client it is set to the last file # revision (or to None (i.e. NULL) in case of first revision) - see the bug # 644985 - SELinux context cleared from RHEL4 rhncfg-client file['selinux_ctx'] = file.get('selinux_ctx', None) if not file['selinux_ctx']: # RHEL4 or RHEL5+ with disabled selinux - set from the last revision h = rhnSQL.prepare(self._query_current_selinux_lookup) h.execute(**file) row = h.fetchone_dict() if row: file['selinux_ctx'] = row['selinux_ctx'] else: file['selinux_ctx'] = None result = {} try: if self._is_file(file): self._push_contents(file) elif self._is_link(file): file['symlink'] = file.get('symlink') or '' except ConfigFileTooLargeError: result['file_too_large'] = 1 t = rhnSQL.Table('rhnConfigFileState', 'label') state_id_alive = t['alive']['id'] file['state_id'] = state_id_alive file['config_channel_id'] = config_channel_id try: self._push_config_file(file) self._push_revision(file) except rhnSQL.SQLSchemaError: e = sys.exc_info()[1] log_debug(4, "schema error", e) rhnSQL.rollback() # blow away the contents that got inserted if e.errno == 20267: # ORA-20267: (not_enough_quota) - Insufficient available quota # for the specified action raise ConfigFileExceedsQuota(file) raise return {}
def _store_file(self, action_id, scap_file): r_dir = get_action_path(self.server.server['org_id'], self.server_id, action_id) if not r_dir: log_debug(1, self.server_id, "Error composing SCAP action directory path") raise rhnFault(5102) r_file = get_actionfile_path(self.server.server['org_id'], self.server_id, action_id, scap_file['filename']) if not r_file: log_debug(1, self.server_id, "Error composing SCAP action file path") raise rhnFault(5103) if not scap_file['content-encoding'] == 'base64': log_debug( 1, self.server_id, "Invalid content encoding: %s" % scap_file['content-encoding']) raise rhnFault(5104) # Create the file on filer filecontent = decodestring(scap_file['filecontent']) # TODO assert for the size of the file absolute_dir = os.path.join(CFG.MOUNT_POINT, r_dir) absolute_file = os.path.join(absolute_dir, scap_file['filename']) if not os.path.exists(absolute_dir): log_debug(1, self.server_id, "Creating action directory: %s" % absolute_dir) os.makedirs(absolute_dir) mode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH subdirs = r_dir.split('/') www_gid = grp.getgrnam('www').gr_gid for idx in range(1, len(subdirs)): subdir = os.path.join(CFG.MOUNT_POINT, *subdirs[0:idx]) if os.path.isdir(subdir): try: log_debug(1, "chmod 755", subdir) os.chmod(subdir, mode) except OSError: pass try: log_debug(1, "chgrp www ", subdir) os.chown(subdir, -1, www_gid) except OSError: pass log_debug(1, self.server_id, "Creating file: %s" % absolute_file) f = open(absolute_file, 'w+') f.write(filecontent) return { 'result': True, }
def save_packages_byid(self, sysid, schedule=1): """ save the package list """ log_debug(3, sysid, "Errata cache to run:", schedule, "Changed:", self.__changed, "%d total packages" % len(self.__p)) if not self.__changed: return 0 commits = 0 # get rid of the deleted packages dlist = [a for a in list(self.__p.values()) if a.real and a.status in (DELETED, UPDATED)] if dlist: log_debug(4, sysid, len(dlist), "deleted packages") h = rhnSQL.prepare(""" delete from rhnServerPackage where server_id = :sysid and name_id = :name_id and evr_id = :evr_id and ((:package_arch_id is null and package_arch_id is null) or package_arch_id = :package_arch_id) """) h.execute_bulk({ 'sysid': [sysid] * len(dlist), 'name_id': [a.name_id for a in dlist], 'evr_id': [a.evr_id for a in dlist], 'package_arch_id': [a.package_arch_id for a in dlist], }) commits = commits + len(dlist) del dlist # And now add packages alist = [a for a in list(self.__p.values()) if a.status in (ADDED, UPDATED)] if alist: log_debug(4, sysid, len(alist), "added packages") h = rhnSQL.prepare(""" insert into rhnServerPackage (server_id, name_id, evr_id, package_arch_id, installtime) values (:sysid, LOOKUP_PACKAGE_NAME(:n), LOOKUP_EVR(:e, :v, :r), LOOKUP_PACKAGE_ARCH(:a), TO_TIMESTAMP(:instime, 'YYYY-MM-DD HH24:MI:SS') ) """) # some fields are not allowed to contain empty string (varchar) def lambdaae(a): if a.e == '': return None else: return a.e package_data = { 'sysid': [sysid] * len(alist), 'n': [a.n for a in alist], 'v': [a.v for a in alist], 'r': [a.r for a in alist], 'e': list(map(lambdaae, alist)), 'a': [a.a for a in alist], 'instime': [self.__expand_installtime(a.installtime) for a in alist], } try: h.execute_bulk(package_data) rhnSQL.commit() except rhnSQL.SQLSchemaError: e = sys.exc_info()[1] # LOOKUP_PACKAGE_ARCH failed if e.errno == 20243: log_debug(2, "Unknown package arch found", e) raise_with_tb(rhnFault(45, "Unknown package arch found"), sys.exc_info()[2]) commits = commits + len(alist) del alist if schedule: # queue this server for an errata update update_errata_cache(sysid) # if provisioning box, and there was an actual delta, snapshot ents = check_entitlement(sysid) if commits and "enterprise_entitled" in ents: snapshot_server(sysid, "Package profile changed") # Our new state does not reflect what's on the database anymore self.__loaded = 0 self.__changed = 0 return 0
def upload(server_id, action_id, data={}): log_debug(3) # First, unmark any file as being failed h = rhnSQL.prepare(_query_reset_upload_files) h.execute(server_id=server_id, action_id=action_id) if not data: log_debug(4, "No data sent by client") return log_debug(6, 'data', data) failure_table = rhnSQL.Table('rhnConfigFileFailure', 'label') h = rhnSQL.prepare(_query_mark_upload_files) # We don't do execute_bulk here, since we want to know if each update has # actually touched a row reason_map = {'missing_files': 'missing', 'files_too_large': 'too_big', 'quota_failed': 'insufficient_quota', } for reason in reason_map.keys(): log_debug(6, 'reason', reason) failed_files = data.get(reason) log_debug(6, 'failed_files', failed_files) if not failed_files: continue failure_id = failure_table[reason_map[reason]]['id'] log_debug(6, 'failure_id', failure_id) for path in failed_files: log_debug(6, 'path', path) ret = h.execute(server_id=server_id, action_id=action_id, failure_id=failure_id, path=path) if not ret: log_error("Could not find file %s for server %s, action %s" % (path, server_id, action_id))
def save(self, server_id): log_debug(4, self.ifaces) self.reload(server_id) log_debug(4, "Interfaces in DB", self.db_ifaces) # Compute updates, deletes and inserts inserts = [] updates = [] deletes = [] ifaces = self.ifaces.copy() for iface in self.db_ifaces: name = iface['name'] if name not in self.ifaces: # To be deleted deletes.append({'server_id': server_id, 'name': name}) continue uploaded_iface = ifaces[name].copy() del ifaces[name] if _hash_eq(uploaded_iface, iface): # Same value continue uploaded_iface.update({'name': name, 'server_id': server_id}) if 'ipv4' in uploaded_iface: del(uploaded_iface['ipv4']) if 'ipv6' in uploaded_iface: del(uploaded_iface['ipv6']) updates.append(uploaded_iface) # Everything else in self.ifaces has to be inserted for name, info in list(ifaces.items()): iface = {} iface['name'] = name iface['server_id'] = server_id iface['hw_addr'] = info['hw_addr'] iface['module'] = info['module'] inserts.append(iface) log_debug(4, "Deletes", deletes) log_debug(4, "Updates", updates) log_debug(4, "Inserts", inserts) self._update(updates) self._insert(inserts) ifaces = self.ifaces.copy() for name, info in list(ifaces.items()): if not 'ipv6' in info: info['ipv6'] = NetIfaceAddress6() info['ipv6'].save(self.get_server_id(server_id, name)) if not 'ipv4' in info: info['ipv4'] = NetIfaceAddress4() info['ipv4'].save(self.get_server_id(server_id, name)) # delete address (if any) of deleted interaces for d in deletes: interface = NetIfaceAddress6() interface.save(self.get_server_id(server_id, d['name'])) interface = NetIfaceAddress4() interface.save(self.get_server_id(server_id, d['name'])) self._delete(deletes) return 0
def _do_nothing(server_id, action_id): log_debug(4, action_id) action_status = rhnFlags.get('action_status') log_debug( 4, "Action ID: %s, Action Status: %s" % (str(action_id), str(action_status)))
def __reserve_user_db(user, password): encrypted_password = CFG.encrypted_passwords log_debug(3, user, CFG.disallow_user_creation, encrypted_password, CFG.pam_auth_service) user = str(user) h = rhnSQL.prepare(""" select w.id, w.password, w.org_id, ui.use_pam_authentication from web_contact w, rhnUserInfo ui where w.login_uc = upper(:p1) and w.id = ui.user_id """) h.execute(p1=user) data = h.fetchone_dict() if data and data["id"]: # contact exists, check password if data['use_pam_authentication'] == 'Y' and CFG.pam_auth_service: # We use PAM for authentication import rhnAuthPAM if rhnAuthPAM.check_password(user, password, CFG.pam_auth_service) > 0: return 1 return -1 if check_password(password, data['password']) > 0: return 1 return -1 # user doesn't exist. now we fail, instead of reserving user. if CFG.disallow_user_creation: raise rhnFault(2001) user, password = check_user_password(user, password) # now check the reserved table h = rhnSQL.prepare(""" select r.login, r.password from rhnUserReserved r where r.login_uc = upper(:p1) """) h.execute(p1=user) data = h.fetchone_dict() if data and data["login"]: # found already reserved if check_password(password, data["password"]) > 0: return 1 return -2 validate_new_username(user) log_debug(3, "calling validate_new_password" ) validate_new_password(password) # this is not reserved either, register it if encrypted_password: # Encrypt the password, let the function pick the salt password = encrypt_password(password) h = rhnSQL.prepare(""" insert into rhnUserReserved (login, password) values (:username, :password) """) h.execute(username=user, password=password) rhnSQL.commit() # all should be dandy return 0
def __init__(self, hw=None, guest=None): log_debug(4, hw, guest) if not hw or "identifier" not in hw or not guest: # incomplete data log_debug(1, "incomplete data") return host = rhnSQL.Row("rhnServer", "digital_server_id") host.load(hw['identifier']) hid = host.get('id') guest.user = rhnUser.User("", "") guest.user.reload(guest.server['creator_id']) guestid = guest.getid() if not hid: # create a new host entry host = server_class.Server(guest.user, hw.get('arch')) host.server["name"] = hw.get('name') host.server["os"] = hw.get('os') host.server["release"] = hw.get('type') host.server["last_boot"] = time.time() host.default_description() host.virt_type = rhnVirtualization.VirtualizationType.FULLY host.virt_uuid = None fake_token = False if not rhnFlags.test("registration_token"): # we need to fake it rhnFlags.set("registration_token", 'fake') fake_token = True host.save(1, None) host.server["digital_server_id"] = hw['identifier'] entitle_server = rhnSQL.Procedure("rhn_entitlements.entitle_server") entitle_server(host.getid(), 'foreign_entitled') host.save(1, None) if fake_token: rhnFlags.set("registration_token", None) hid = host.getid() host.reload(hid) log_debug(4, "New host created: ", host) else: host = server_class.Server(None) host.reload(hid) host.checkin(commit=0) log_debug(4, "Found host: ", host) host.reload_hardware() hostcpu = host.hardware_by_class(CPUDevice) if hostcpu and len(hostcpu) > 0: hostcpu = hostcpu[0].data else: hostcpu = None if not hostcpu or str(hostcpu.get('nrsocket')) != hw.get('total_ifls'): # update only if the number has changed log_debug(1, "update host cpu:", hw.get('total_ifls')) cpu = { 'class' : 'CPU', 'desc' : 'Processor', 'count' : hw.get('total_ifls'), 'model_ver' : '', 'speed' : '0', 'cache' : '', 'model_number' : '', 'bogomips' : '', 'socket_count' : hw.get('total_ifls'), 'platform' : hw.get('arch'), 'other' : '', 'model_rev' : '', 'model' : hw.get('arch'), 'type' : hw.get('type')} host.delete_hardware() host.add_hardware(cpu) host.save_hardware() h = rhnSQL.prepare(""" select host_system_id from rhnVirtualInstance where virtual_system_id = :guestid""") h.execute(guestid=guestid) row = h.fetchone_dict() if not row or not row['host_system_id']: self._insert_virtual_instance(hid, None, fakeuuid=False) self._insert_virtual_instance(hid, guestid, fakeuuid=True) elif row['host_system_id'] != hid: log_debug(4, "update_virtual_instance", hid, guestid) q_update = rhnSQL.prepare(""" UPDATE rhnVirtualInstance SET host_system_id = :host_id WHERE virtual_system_id = :guest_id AND host_system_id = :old_host_id """) q_update.execute(host_id=hid, guest_id=guestid, old_host_id=row['host_system_id'])
def _templ_srpms(): "Returns a template for querying srpms" log_debug(4, "Generating template for querying srpms") return """\
def add_hardware(self, hardware): """ add new hardware """ log_debug(4, hardware) if not hardware: return -1 if type(hardware) == type({}): hardware = UserDictCase(hardware) if not isinstance(hardware, UserDictCase): log_error("argument type is not hash: %s" % hardware) raise TypeError("This function requires a hash as an argument") # validation is important hw_class = hardware.get("class") if hw_class is None: return -1 hw_class = hw_class.lower() class_type = None if hw_class in ["video", "audio", "audio_hd", "usb", "other", "hd", "floppy", "mouse", "modem", "network", "cdrom", "scsi", "unspec", "scanner", "tape", "capture", "raid", "socket", "keyboard", "printer", "firewire", "ide"]: class_type = HardwareDevice elif hw_class == "cpu": class_type = CPUDevice elif hw_class == "memory": class_type = MemoryInformation elif hw_class == "dmi": class_type = DMIInformation elif hw_class == "installinfo": class_type = InstallInformation elif hw_class == "netinterfaces": class_type = NetIfaceInformation elif hw_class == "fqdn": class_type = FQDNInformation elif hw_class == "sysinfo": # special case: we got info about a virtual host # where this system is running on SystemInformation(hardware, self) return 0 elif hw_class == "machineinfo": MachineInformation(self.server["id"], self.server["name"], hardware) return 0 else: log_error("UNKNOWN CLASS TYPE `%s'" % hw_class) # Same trick: try-except and raise the exception so that Traceback # can send the e-mail try: raise KeyError("Unknown class type `%s' for hardware '%s'" % ( hw_class, hardware)) except: Traceback(mail=1) return # create the new device new_dev = class_type(hardware) if class_type in self.__hardware: _l = self.__hardware[class_type] else: _l = self.__hardware[class_type] = [] _l.append(new_dev) self.__changed = 1 return 0
def timer(last): if not last: return 0 log_debug(2, "%.2f sec" % (time.time() - last, )) return 0
def set_info(self, name, value): """ set a certain value for the userinfo field. This is BUTT ugly. """ log_debug(3, name, value) # translation from what the client send us to real names of the fields # in the tables. mapping = { "first_name" : "first_names", "position" : "title", "title" : "prefix" } if not name: return -1 name = name.lower() if type(value) == type(""): value = value.strip() # We have to watch over carefully for different field names # being sent from rhn_register changed = 0 # translation if name in mapping.keys(): name = mapping[name] # Some fields can not have null string values if name in ["first_names", "last_name", "prefix", # personal_info "address1", "city", "country"]: # site_info # we require something of it if len(str(value)) == 0: return -1 # fields in personal_info (and some in site) if name in ["last_name", "first_names", "company", "phone", "fax", "email", "title"]: self.info[name] = value[:128] changed = 1 elif name == "prefix": values = ["Mr.", "Mrs.", "Ms.", "Dr.", "Hr.", "Sr.", " "] # Now populate a dictinary of valid values valids = UserDictCase() for v in values: # initialize from good values, with and w/o the dot valids[v] = v valids[v[:-1]] = v # commonly encountered values valids["Miss"] = "Miss" valids["Herr"] = "Hr." valids["Sig."] = "Sr." valids["Sir"] = "Mr." # Now check it out if valids.has_key(value): self.info["prefix"] = valids[value] changed = 1 else: log_error("Unknown prefix value `%s'. Assumed `Mr.' instead" % value) self.info["prefix"] = "Mr." changed = 1 # fields in site if name in ["phone", "fax", "zip"]: self.site[name] = value[:32] changed = 1 elif name in ["city", "country", "alt_first_names", "alt_last_name", "address1", "address2", "email", "last_name", "first_names"]: if name == "last_name": self.site["alt_last_name"] = value changed = 1 elif name == "first_names": self.site["alt_first_names"] = value changed = 1 else: self.site[name] = value[:128] changed = 1 elif name in ["state"]: # stupid people put stupid things in here too self.site[name] = value[:60] changed = 1 if not changed: log_error("SET_INFO: Unknown info `%s' = `%s'" % (name, value)) return 0
def add_tools_channel(server_id, action_id, dry_run=0): if (not dry_run): subscribe_to_tools_channel(server_id) else: log_debug(4, "dry run requested") raise ShadowAction("Subscribed server to tools channel.")
def delete_channels(channelLabels, force=0, justdb=0, skip_packages=0, skip_channels=0, skip_kickstart_trees=0, just_kickstart_trees=0): # Get the package ids if not channelLabels: return rpms_ids = list_packages(channelLabels, force=force, sources=0) rpms_paths = _get_package_paths(rpms_ids, sources=0) srpms_ids = list_packages(channelLabels, force=force, sources=1) srpms_paths = _get_package_paths(srpms_ids, sources=1) if not skip_packages and not just_kickstart_trees: _delete_srpms(srpms_ids) _delete_rpms(rpms_ids) if not skip_kickstart_trees and not justdb: _delete_ks_files(channelLabels) if not justdb and not skip_packages and not just_kickstart_trees: _delete_files(rpms_paths + srpms_paths) # Get the channel ids h = rhnSQL.prepare(""" select id, parent_channel from rhnChannel where label = :label order by parent_channel""") channel_ids = [] for label in channelLabels: h.execute(label=label) row = h.fetchone_dict() if not row: break channel_id = row['id'] if row['parent_channel']: # Subchannel, we have to remove it first channel_ids.insert(0, channel_id) else: channel_ids.append(channel_id) if not channel_ids: return clp = rhnSQL.prepare(""" select id from susecontentenvironmenttarget where channel_id = :cid """) for cid in channel_ids: clp.execute(cid=cid) row = clp.fetchone() if row: print( "Channel belongs to a Content Lifecycle Project. Please use the web UI or API." ) return indirect_tables = [ ['rhnKickstartableTree', 'channel_id', 'rhnKSTreeFile', 'kstree_id'], ] query = """ delete from %(table_2)s where %(link_field)s in ( select id from %(table_1)s where %(channel_field)s = :channel_id ) """ for e in indirect_tables: args = { 'table_1': e[0], 'channel_field': e[1], 'table_2': e[2], 'link_field': e[3], } h = rhnSQL.prepare(query % args) h.executemany(channel_id=channel_ids) tables = [ ['rhnErrataFileChannel', 'channel_id'], ['rhnErrataNotificationQueue', 'channel_id'], ['rhnChannelErrata', 'channel_id'], ['rhnChannelPackage', 'channel_id'], ['rhnRegTokenChannels', 'channel_id'], ['rhnServerProfile', 'base_channel'], ['rhnKickstartableTree', 'channel_id'], ] if not skip_channels: tables.extend([ ['suseProductChannel', 'channel_id'], ['rhnChannelFamilyMembers', 'channel_id'], ['rhnDistChannelMap', 'channel_id'], ['rhnReleaseChannelMap', 'channel_id'], ['rhnChannel', 'id'], ]) if just_kickstart_trees: tables = [['rhnKickstartableTree', 'channel_id']] query = "delete from %s where %s = :channel_id" for table, field in tables: log_debug(3, "Processing table %s" % table) h = rhnSQL.prepare(query % (table, field)) h.executemany(channel_id=channel_ids) if not justdb and not just_kickstart_trees: __deleteRepoData(channelLabels)
def __init__(self, hostname): log_debug(3) ProxyAuth.hostname = hostname self.__processSystemid()
def handler(self, req): """ main Apache handler """ log_debug(2) ret = apacheSession.handler(self, req) if ret != apache.OK: return ret if not CFG.SEND_MESSAGE_TO_ALL: # Need to get any string template overrides here, before any app # code gets executed, as the rhnFault error messages use the # templates # If send_message_to_all, we don't have DB connectivity though h = rhnSQL.prepare("select label, value from rhnTemplateString") h.execute() templateStrings = {} while 1: row = h.fetchone_dict() if not row: break templateStrings[row['label']] = row['value'] if templateStrings: rhnFlags.set('templateOverrides', templateStrings) log_debug(4, "template strings: %s" % templateStrings) if not CFG.SECRET_KEY: # Secret key not defined, complain loudly try: raise rhnException("Secret key not found!") except: rhnTB.Traceback(mail=1, req=req, severity="schema") req.status = 500 req.send_http_header() return apache.OK # Try to authenticate the proxy if it this request passed # through a proxy. if self.proxyVersion: try: ret = self._req_processor.auth_proxy() except rhnFault: f = sys.exc_info()[1] return self._req_processor.response(f.getxml()) # Decide what to do with the request: try to authenticate the client. # NOTE: only upon GET requests is there Signature information to # authenticate. XMLRPC requests DO NOT use signature # authentication. if req.method == "GET": try: ret = self._req_processor.auth_client() except rhnFault: f = sys.exc_info()[1] return self._req_processor.response(f.getxml()) # be safe rather than sorry if not ret: log_error("Got a GET call, but auth_client declined", req.path_info) return apache.HTTP_METHOD_NOT_ALLOWED # Avoid leaving Oracle deadlocks try: ret = self._req_processor.process() rhnSQL.rollback() except Exception as exc: if not CFG.SEND_MESSAGE_TO_ALL: rhnSQL.rollback() raise log_debug(4, "Leave with return value", ret) return ret
def login(self): """ Login and fetch new token (proxy token). How it works in a nutshell. Only the broker component uses this. We perform a xmlrpc request to rhn_parent. This occurs outside of the http process we are currently working on. So, we do this all on our own; do all of our own SSL decisionmaking etc. We use CFG.RHN_PARENT as we always bypass the SSL redirect. DESIGN NOTES: what is the proxy auth token? ------------------------------------------- An Spacewalk Proxy auth token is a token fetched upon login from Red Hat Satellite or hosted. It has this format: 'S:U:ST:EO:SIG' Where: S = server ID U = username ST = server time EO = expiration offset SIG = signature H = hostname (important later) Within this function within the Spacewalk Proxy Broker we also tag on the hostname to the end of the token. The token as described above is enough for authentication purposes, but we need a to identify the exact hostname (as the Spacewalk Proxy sees it). So now the token becomes (token:hostname): 'S:U:ST:EO:SIG:H' DESIGN NOTES: what is X-RHN-Proxy-Auth? ------------------------------------------- This is where we use the auth token beyond Spacewalk Proxy login purposes. This a header used to track request routes through a hierarchy of RHN Proxies. X-RHN-Proxy-Auth is a header that passes proxy authentication information around in the form of an ordered list of tokens. This list is used to gain information as to how a client request is routed throughout an RHN topology. Format: 'S1:U1:ST1:EO1:SIG1:H1,S2:U2:ST2:EO2:SIG2:H2,...' |_________1_________| |_________2_________| |__... token token where token is really: token:hostname leftmost token was the first token hit by a client request. rightmost token was the last token hit by a client request. """ # pylint: disable=R0915 log_debug(3) server = self.__getXmlrpcServer() error = None token = None # update the systemid/serverid if need be. self.__processSystemid() # Makes three attempts to login for _i in range(self.__nRetries): try: token = server.proxy.login(self.__systemid) except (socket.error, socket.sslerror), e: if CFG.HTTP_PROXY: # socket error, check to see if your HTTP proxy is running... s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) httpProxy, httpProxyPort = CFG.HTTP_PROXY.split(':') try: s.connect((httpProxy, int(httpProxyPort))) except socket.error, e: error = [ 'socket.error', 'HTTP Proxy not running? ' '(%s) %s' % (CFG.HTTP_PROXY, e) ] # rather big problem: http proxy not running. log_error("*** ERROR ***: %s" % error[1]) Traceback(mail=0) except socket.sslerror, e: error = [ 'socket.sslerror', '(%s) %s' % (CFG.HTTP_PROXY, e) ] # rather big problem: http proxy not running. log_error("*** ERROR ***: %s" % error[1]) Traceback(mail=0) else: error = ['socket', str(e)] log_error(error) Traceback(mail=0)
def use_satellite(serverId, actionId, dry_run=0): log_debug(3) return None