def test_execute_rowcount(self): """Tests row counts""" table_name = "misatest" try: tables = self._list_tables() if not table_name in tables: rhnSQL.execute("create table %s (id int, value int)" % table_name) else: rhnSQL.execute("delete from %s" % table_name) insert_statement = rhnSQL.Statement( "insert into %s values (:item_id, :value)" % table_name) h = rhnSQL.prepare(insert_statement) ret = h.execute(item_id=1, value=2) self.assertEqual(ret, 1) ret = h.execute(item_id=2, value=2) self.assertEqual(ret, 1) delete_statement = rhnSQL.Statement("delete from %s" % table_name) h = rhnSQL.prepare(delete_statement) ret = h.execute() self.assertEqual(ret, 2) rhnSQL.commit() finally: rhnSQL.execute("drop table %s" % table_name)
class KickstartableTreesDumper(CachedDumper, exportLib.KickstartableTreesDumper): iterator_query = rhnSQL.Statement(""" select kt.id, c.label channel, kt.base_path "base-path", kt.label, kt.boot_image "boot-image", ktt.name "kstree-type-name", ktt.label "kstree-type-label", kit.name "install-type-name", kit.label "install-type-label", TO_CHAR(kt.last_modified, 'YYYYMMDDHH24MISS') "last-modified" from rhnKickstartableTree kt, rhnKSTreeType ktt, rhnKSInstallType kit, rhnChannel c where kt.channel_id = c.id and ktt.id = kt.kstree_type and kit.id = kt.install_type and kt.org_id is NULL and kt.label = :kickstart_label """) def _get_key(self, params): kickstart_label = params['kickstart_label'] return "xml-kickstartable-tree/%s.xml" % kickstart_label
class ErrataDumper(exportLib.ErrataDumper): iterator_query = rhnSQL.Statement(""" select e.id, e.org_id, e.advisory_name, e.advisory, e.advisory_type, e.advisory_rel, e.product, e.description, e.synopsis, e.topic, e.solution, TO_CHAR(e.issue_date, 'YYYYMMDDHH24MISS') issue_date, TO_CHAR(e.update_date, 'YYYYMMDDHH24MISS') update_date, TO_CHAR(e.last_modified, 'YYYYMMDDHH24MISS') last_modified, e.refers_to, e.notes, e.errata_from from rhnErrata e where e.id = :errata_id """) def __init__(self, writer, params): statement = rhnSQL.prepare(self.iterator_query) iterator = QueryIterator(statement, params) exportLib.ErrataDumper.__init__(self, writer, iterator)
class SourcePackagesDumper(CachedDumper, exportLib.SourcePackagesDumper): iterator_query = rhnSQL.Statement(""" select ps.id, sr.name source_rpm, pg.name package_group, ps.rpm_version, ps.payload_size, ps.build_host, TO_CHAR(ps.build_time, 'YYYYMMDDHH24MISS') build_time, sig.checksum sigchecksum, sig.checksum_type sigchecksum_type, ps.vendor, ps.cookie, ps.package_size, c.checksum_type, c.checksum, TO_CHAR(ps.last_modified, 'YYYYMMDDHH24MISS') last_modified from rhnPackageSource ps, rhnPackageGroup pg, rhnSourceRPM sr, rhnChecksumView c, rhnChecksumView sig where ps.id = :package_id and ps.package_group = pg.id and ps.source_rpm_id = sr.id and ps.checksum_id = c.id and ps.sigchecksum_id = sig.id """) item_id_key = 'package_id' hash_factor = 2 key_template = 'xml-packages/%s/rhn-source-package-%s.xml'
class ShortPackagesDumper(CachedDumper, exportLib.ShortPackagesDumper): iterator_query = rhnSQL.Statement(""" select p.id, p.org_id, pn.name, (pe.evr).version as version, (pe.evr).release as release, (pe.evr).epoch as epoch, pa.label as package_arch, c.checksum_type, c.checksum, p.package_size, TO_CHAR(p.last_modified, 'YYYYMMDDHH24MISS') as last_modified from rhnPackage p, rhnPackageName pn, rhnPackageEVR pe, rhnPackageArch pa, rhnChecksumView c where p.id = :package_id and p.name_id = pn.id and p.evr_id = pe.id and p.package_arch_id = pa.id and p.checksum_id = c.id """) item_id_key = 'package_id' hash_factor = 2 key_template = 'xml-short-packages/%s/rhn-package-short-%s.xml'
def _get_package_field_ids(self): """ "Private" function that retrieves the database id's for the name, EVR, and package architecture and sets self.name_id, self.evr_id, and self.arch_id to their values. """ package_id = self.get_id() if not package_id: raise PackageNotFound("ID for package %s was not found." % self.get_name()) _package_info_query = rhnSQL.Statement(""" select p.name_id name_id, p.evr_id evr_id, p.package_arch_id arch_id from rhnPackage p where p.id = :package_id """) prepared_query = rhnSQL.prepare(_package_info_query) prepared_query.execute(package_id=package_id) package_info_results = prepared_query.fetchone_dict() if not package_info_results: raise PackageNotFound("Name, EVR, and Arch info not found for %s" % self.get_name()) self.name_id = package_info_results['name_id'] self.evr_id = package_info_results['evr_id'] self.arch_id = package_info_results['arch_id']
def _add_package_to_install_action(self, action_id): """ Private function that adds self.package to the rhnActionPackage table. """ name_id = self.package.get_name_id() package_arch_id = self.package.get_arch_id() evr_id = self.package.get_evr_id() insert_package_query = rhnSQL.Statement(""" insert into rhnActionPackage(id, action_id, parameter, name_id, evr_id, package_arch_id) values (sequence_nextval('rhn_act_p_id_seq'), :action_id, 'install', :name_id, :evr_id, :package_arch_id) """) prepared_query = rhnSQL.prepare(insert_package_query) prepared_query.execute(action_id=str(action_id), name_id=str(name_id), evr_id=str(evr_id), package_arch_id=str(package_arch_id))
class ChannelsDumper(exportLib.ChannelsDumper): _query_list_channels = rhnSQL.Statement(""" select c.id, c.label, ca.label channel_arch, c.basedir, c.name, c.summary, c.description, TO_CHAR(c.last_modified, 'YYYYMMDDHH24MISS') last_modified, pc.label parent_channel from rhnChannel c left outer join rhnChannel pc on c.parent_channel = pc.id, rhnChannelArch ca where c.label = :channel and c.channel_arch_id = ca.id """) def set_iterator(self): h = rhnSQL.prepare(self._query_list_channels) h.execute(channel=self._channels[0]) return h
def _get_ks_virt_type(type_id): _query_kickstart_virt_type = rhnSQL.Statement(""" select label from rhnKickstartVirtualizationType kvt where kvt.id = :id """) prepared_query = rhnSQL.prepare(_query_kickstart_virt_type) prepared_query.execute(id=type_id) row = prepared_query.fetchone_dict() # XXX: we should have better constraints on the db so this doesn't happen. if not row: kstype = 'auto' else: kstype = row['label'] log_debug(1, "KS_TYPE: %s" % kstype) return kstype
class PackagesDumper(CachedDumper, exportLib.PackagesDumper): iterator_query = rhnSQL.Statement(""" select p.id, p.org_id, pn.name, (pe.evr).version as version, (pe.evr).release as release, (pe.evr).epoch as epoch, pa.label as package_arch, pg.name as package_group, p.rpm_version, p.description, p.summary, p.package_size, p.payload_size, p.installed_size, p.build_host, TO_CHAR(p.build_time, 'YYYYMMDDHH24MISS') as build_time, sr.name as source_rpm, c.checksum_type, c.checksum, p.vendor, p.payload_format, p.compat, p.header_sig, p.header_start, p.header_end, p.copyright, p.cookie, TO_CHAR(p.last_modified, 'YYYYMMDDHH24MISS') as last_modified from rhnPackage p, rhnPackageName pn, rhnPackageEVR pe, rhnPackageArch pa, rhnPackageGroup pg, rhnSourceRPM sr, rhnChecksumView c where p.id = :package_id and p.name_id = pn.id and p.evr_id = pe.id and p.package_arch_id = pa.id and p.package_group = pg.id and p.source_rpm_id = sr.id and p.checksum_id = c.id """) item_id_key = 'package_id' hash_factor = 2 key_template = 'xml-packages/%s/rhn-package-%s.xml'
class ChannelsDumper(exportLib.ChannelsDumper): _query_list_channels = rhnSQL.Statement(""" select c.id, c.org_id, c.label, ca.label channel_arch, c.basedir, c.name, c.summary, c.description, c.gpg_key_url, ct.label checksum_type, TO_CHAR(c.last_modified, 'YYYYMMDDHH24MISS') last_modified, pc.label parent_channel, c.channel_access from rhnChannel c left outer join rhnChannel pc on c.parent_channel = pc.id left outer join rhnChecksumType ct on c.checksum_type_id = ct.id, rhnChannelArch ca where c.id = :channel_id and c.channel_arch_id = ca.id """) def __init__(self, writer, channels=(), start_date=None, end_date=None, use_rhn_date=True, whole_errata=False): exportLib.ChannelsDumper.__init__(self, writer, channels) self.start_date = start_date self.end_date = end_date self.use_rhn_date = use_rhn_date self.whole_errata = whole_errata def dump_subelement(self, data): log_debug(6, data) # return exportLib.ChannelsDumper.dump_subelement(self, data) # pylint: disable=W0212 c = exportLib._ChannelDumper(self._writer, data, self.start_date, self.end_date, self.use_rhn_date, self.whole_errata) c.dump() def set_iterator(self): if not self._channels: # Nothing to do return h = rhnSQL.prepare(self._query_list_channels) return QueryIterator(statement=h, params=self._channels)
def update_client_states(self, clients): """Query and update the database for clients to be pinged""" if not clients: return h = rhnSQL.prepare( rhnSQL.Statement(""" UPDATE rhnPushClient SET state_id = state.id FROM ( SELECT id FROM rhnPushClientState WHERE label = :state ) state WHERE name = :client_id """)) h.executemany(client_id=[c['id'] for c in clients], state=[c['state'] for c in clients]) rhnSQL.commit()
class ChannelsDumperEx(CachedDumper, exportLib.ChannelsDumper): iterator_query = rhnSQL.Statement(""" select c.id, c.label, ca.label channel_arch, c.basedir, c.name, c.summary, c.description, c.gpg_key_url, c.org_id, TO_CHAR(c.last_modified, 'YYYYMMDDHH24MISS') last_modified, c.channel_product_id, pc.label parent_channel, cp.product channel_product, cp.version channel_product_version, cp.beta channel_product_beta, c.receiving_updates, ct.label checksum_type, c.channel_access from rhnChannel c left outer join rhnChannel pc on c.parent_channel = pc.id left outer join rhnChannelProduct cp on c.channel_product_id = cp.id left outer join rhnChecksumType ct on c.checksum_type_id = ct.id, rhnChannelArch ca where c.id = :channel_id and c.channel_arch_id = ca.id """) def _get_key(self, params): channel_id = params['channel_id'] return "xml-channels/rhn-channel-%d.xml" % channel_id
class ServerGroup: def __init__(self): self._row_server_group = None _query_lookup = rhnSQL.Statement(""" select id from rhnServerGroup where org_id = :org_id and name = :name """) def load(self, org_id, name): org_id = self._lookup_org_id(org_id) h = rhnSQL.prepare(self._query_lookup) h.execute(org_id=org_id, name=name) row = h.fetchone_dict() if not row: raise InvalidServerGroupError(org_id, name) server_group_id = row['id'] self._row_server_group = rhnSQL.Row("rhnServerGroup", 'id') self._row_server_group.load(server_group_id) # Setters def set_org_id(self, org_id): self._set('org_id', self._lookup_org_id(org_id)) def _set(self, name, val): if self._row_server_group is None: self._row_server_group = rhnSQL.Row('rhnServerGroup', 'id') server_group_id = rhnSQL.Sequence('rhn_server_group_id_seq').next() self._row_server_group.create(server_group_id) self._row_server_group[name] = val # Getters def _get(self, name): return self._row_server_group[name] def _lookup_org_id(self, org_id): if isinstance(org_id, types.StringType): # Is it a user? u = rhnUser.search(org_id) if not u: raise InvalidUserError(org_id) return u.contact['org_id'] t = rhnSQL.Table('web_customer', 'id') row = t[org_id] if not row: raise InvalidOrgError(org_id) return row['id'] def save(self): if not self._row_server_group: return self._row_server_group.save() def __getattr__(self, name): if name.startswith('get_'): return CallableObj(name[4:], self._get) if name.startswith('set_'): return CallableObj(name[4:], self._set) raise AttributeError(name)
def create_server_group(params): "Create a server group from a dictionary with the params" return apply(_create_server_group, (), params) def fetch_server_group(org_id, name): "Load a server group object from the org id and name" s = rhnServerGroup.ServerGroup() s.load(org_id, name) return s _query_fetch_server_groups = rhnSQL.Statement(""" select sgm.server_group_id from rhnServerGroupMembers sgm, rhnServerGroup sg where sgm.server_id = :server_id and sgm.server_group_id = sg.id and sg.group_type is null """) def fetch_server_groups(server_id): "Return a server's groups" h = rhnSQL.prepare(_query_fetch_server_groups) h.execute(server_id=server_id) groups = map(lambda x: x['server_group_id'], h.fetchall_dict() or []) groups.sort() return groups def build_server_group_params(**kwargs):
def __init__(self): if TestServer.__instance is None: TestServer.__instance = TestServer.TestServerImplementation() self.__dict__['_TestServer__instance'] = TestServer.__instance def __getattr__(self, attr): return getattr(TestServer.__instance, attr) def __setattr__(self, attr, value): return setattr(TestServer.__instance, attr, value) _query_action_lookup = rhnSQL.Statement(""" select * from rhnServerAction where server_id = :server_id """) def look_at_actions(server_id): h = rhnSQL.prepare(_query_action_lookup) h.execute(server_id=server_id) return h.fetchall_dict() if __name__ == "__main__": myserver = TestServer() #myserver.upload_packages('/home/devel/wregglej/rpmtest') #handler = rhnHandler() #print handler.auth_system( myserver.getSystemId() )
PackageInstallScheduler, \ NoActionInfo, \ PackageNotFound from spacewalk.server.rhnChannel import subscribe_to_tools_channel __rhnexport__ = [ 'initiate', 'schedule_virt_guest_pkg_install', 'add_tools_channel' ] _query_initiate_guest = rhnSQL.Statement(""" select ksd.label as profile_name, akg.kickstart_host, kvt.label as virt_type, akg.mem_kb, akg.vcpus, akg.disk_path, akg.virt_bridge, akg.cobbler_system_name, akg.disk_gb, akg.append_string, akg.guest_name, akg.ks_session_id from rhnActionKickstartGuest akg, rhnKSData ksd, rhnKickstartSession ksess, rhnKickstartDefaults ksdef, rhnKickstartVirtualizationType kvt where akg.action_id = :action_id and ksess.kickstart_id = ksd.id and ksess.id = akg.ks_session_id and ksdef.kickstart_id = ksd.id and ksdef.virtualization_type = kvt.id """) def schedule_virt_guest_pkg_install(server_id, action_id, dry_run=0): """ ShadowAction that schedules a package installation action for the rhn-virtualization-guest package. """ log_debug(3)
# in this software or its documentation. # # import base64 from spacewalk.common.rhnLog import log_debug from spacewalk.server import rhnSQL # the "exposed" functions __rhnexport__ = ['run'] _query_clear_output = rhnSQL.Statement(""" delete from rhnServerActionScriptResult where server_id = :server_id and action_script_id = ( select id from rhnActionScript where action_id = :action_id ) """) _query_initial_store = rhnSQL.Statement(""" insert into rhnServerActionScriptResult ( server_id, action_script_id, output, start_date, stop_date, return_code ) values ( :server_id,
return ''.join(rc) class _dummyDefaultProfile: def getAttribute(self, name): if name == 'id': return 'None' elif name == 'title': return 'No profile selected. Using defaults.' return '' _query_clear_tresult = rhnSQL.Statement(""" delete from rhnXccdfTestresult where server_id = :server_id and action_scap_id = ( select id from rhnActionScap where action_id = :action_id) """) _query_insert_tresult = rhnSQL.Statement(""" insert into rhnXccdfTestresult( id, server_id, action_scap_id, benchmark_id, profile_id, identifier, start_time, end_time, errors)
for (cf_name, _org_id), (max_members, max_flex) in cfps.items(): if not max_members: max_members = 0 if not max_flex: max_flex = 0 if cf_max_tuples.has_key(cf_name): cf_max_members, cf_max_flex = cf_max_tuples[cf_name] cf_max_tuples[cf_name] = (cf_max_members + max_members, cf_max_flex + max_flex) else: cf_max_tuples[cf_name] = (max_members, max_flex) return cf_max_tuples _query_fetch_existing_channel_families = rhnSQL.Statement(""" select label from rhnChannelFamily cf """) def _fetch_existing_channel_families(): h = rhnSQL.prepare(_query_fetch_existing_channel_families) h.execute() cfs = {} while 1: row = h.fetchone_dict() if not row: break cfs[row['label']] = 1
class ConfigManagement(configFilesHandler.ConfigFilesHandler): def __init__(self): log_debug(3) configFilesHandler.ConfigFilesHandler.__init__(self) self.functions.update({ 'management.get_file': 'management_get_file', 'management.list_config_channels': 'management_list_channels', 'management.create_config_channel': 'management_create_channel', 'management.remove_config_channel': 'management_remove_channel', 'management.list_file_revisions': 'management_list_file_revisions', 'management.list_files': 'management_list_files', 'management.has_file': 'management_has_file', 'management.put_file': 'management_put_file', 'management.remove_file': 'management_remove_file', 'management.diff': 'management_diff', 'management.get_default_delimiters': 'management_get_delimiters', 'management.get_maximum_file_size': 'management_get_maximum_file_size', }) self.user = None self.default_delimiter = '@' _query_list_config_channels = rhnSQL.Statement(""" select cc.name, cc.label, cct.label channel_type from rhnConfigChannelType cct, rhnConfigChannel cc where cc.org_id = :org_id and cc.confchan_type_id = cct.id and cct.label = 'normal' order by cc.label, cc.name """) def _get_and_validate_session(self, dict): session = dict.get('session') self._validate_session(session) def management_list_channels(self, dict): log_debug(1) self._get_and_validate_session(dict) return map( lambda x: x['label'], rhnSQL.fetchall_dict(self._query_list_config_channels, org_id=self.org_id) or []) _query_lookup_config_channel = rhnSQL.Statement(""" select id from rhnConfigChannel where org_id = :org_id and label = :config_channel """) def management_create_channel(self, dict): log_debug(1) self._get_and_validate_session(dict) config_channel = dict.get('config_channel') # XXX Validate the namespace config_channel_name = dict.get('config_channel_name') or config_channel config_channel_description = dict.get('description') or config_channel row = rhnSQL.fetchone_dict(self._query_lookup_config_channel, org_id=self.org_id, config_channel=config_channel) if row: raise rhnFault(4010, "Configuration channel %s already exists" % config_channel, explain=0) insert_call = rhnSQL.Function('rhn_config.insert_channel', rhnSQL.types.NUMBER()) config_channel_id = insert_call(self.org_id, 'normal', config_channel_name, config_channel, config_channel_description) rhnSQL.commit() return {} _query_config_channel_by_label = rhnSQL.Statement(""" select id from rhnConfigChannel where org_id = :org_id and label = :label """) def management_remove_channel(self, dict): log_debug(1) self._get_and_validate_session(dict) config_channel = dict.get('config_channel') # XXX Validate the namespace row = rhnSQL.fetchone_dict(self._query_config_channel_by_label, org_id=self.org_id, label=config_channel) if not row: raise rhnFault(4009, "Channel not found") delete_call = rhnSQL.Procedure('rhn_config.delete_channel') try: delete_call(row['id']) except rhnSQL.SQLError, e: errno = e.args[0] if errno == 2292: raise rhnFault(4005, "Cannot remove non-empty channel %s" % config_channel, explain=0), None, sys.exc_info()[2] raise log_debug(5, "Removed:", config_channel) rhnSQL.commit() return ""
set status = :status, result_code = :result_code, result_msg = :result_message, completion_time = current_timestamp where action_id = :action_id and server_id = :server_id """) h.execute(action_id=action_id, server_id=server_id, status=status, result_code=result_code, result_message=result_message) _query_lookup_action_childs = rhnSQL.Statement(""" select a.id from rhnAction a join rhnServerAction sa on sa.action_id = a.id where prerequisite = :action_id and sa.server_id = :server_id """) _query_lookup_action = rhnSQL.Statement(""" select sa.action_id, sa.status from rhnServerAction sa where sa.server_id = :server_id and sa.action_id = :action_id """) def invalidate_action(server_id, action_id): log_debug(4, server_id, action_id) h = rhnSQL.prepare(_query_lookup_action)
class UpstreamServer(SocketServer.TCPServer): def __init__(self, server_address): SocketServer.TCPServer.__init__(self, server_address, None) self._next_poll_interval = None def get_server_port(self): return self.server_address[1] def get_next_poll_interval(self): return self._next_poll_interval def set_jabber_connection(self, jabber_connection): self.jabber_connection = jabber_connection def finish_request(self, request, client_address): log_debug(2, client_address) log_debug(2,"###about to notify jabber nodes from finish request") self.notify_jabber_nodes() def notify_jabber_nodes(self): log_debug(3) h = rhnSQL.prepare(self._query_get_pending_clients) h.execute() self._next_poll_interval = None while 1: row = h.fetchone_dict() if not row: # End of loop break delta = row['delta'] if delta > 0: # Set the next poll interval to something large if it was not # previously set before; this way min() will pick up this # delta, but we don't have to special-case the first delta we # find npi = self._next_poll_interval or 86400 self._next_poll_interval = min(delta, npi) log_debug(4, "Next poll interval", delta) continue jabber_id = row['jabber_id'] if jabber_id is None: # Not even online continue server_id = row['server_id'] if server_id and reboot_in_progress(server_id): # don't call when a reboot is in progress continue if not self.jabber_connection.jid_available(jabber_id): log_debug(4, "Node %s not available for notifications" % jabber_id) # iterate further, in case there are other clients that # CAN be notified. continue log_debug(4, "Notifying", jabber_id, row['server_id']) self.jabber_connection.send_message(jabber_id, jabber_lib.NS_RHN_MESSAGE_REQUEST_CHECKIN) rhnSQL.commit() # We need to drive this query by rhnPushClient since it's substantially # smaller than rhnAction _query_get_pending_clients = rhnSQL.Statement(""" select a.id, sa.server_id, pc.jabber_id, date_diff_in_days(current_timestamp, earliest_action) * 86400 delta from rhnServerAction sa, rhnAction a, rhnPushClient pc where pc.server_id = sa.server_id and sa.action_id = a.id and sa.status in (0, 1) -- Queued or picked up and not exists ( -- This is like saying 'this action has no -- prerequisite or has a prerequisite that has completed -- (status = 2) select 1 from rhnServerAction sap where sap.server_id = sa.server_id and sap.action_id = a.prerequisite and sap.status != 2 ) order by earliest_action """)
#rhn_channel.update_family_counts(channel_family_id_val, server_org_id_val) update_family_counts = rhnSQL.Procedure("rhn_channel.update_family_counts") for famid in channel_family_ids: # Update the channel family counts separately at the end here # instead of in the loop above. If you have an activation key # with lots of custom child channels you can end up repeatedly # updating the same channel family counts over and over and over # even thou you really only need todo it once. log_debug(5, "calling update fam counts: %s" % famid) update_family_counts(famid, server['org_id']) return ret _query_token_server_groups = rhnSQL.Statement(""" select rtg.server_group_id, sg.name from rhnRegTokenGroups rtg, rhnServerGroup sg where rtg.token_id = :token_id and sg.id = rtg.server_group_id """) def token_server_groups(server_id, tokens_obj): """ Handle server group subscriptions for the registration token """ assert(isinstance(tokens_obj, ActivationTokens)) h = rhnSQL.prepare(_query_token_server_groups) server_groups = {} for token in tokens_obj.tokens: token_id = token['token_id'] h.execute(token_id=token_id) while 1: row = h.fetchone_dict() if not row:
if self._tcp_server in rfds: # we were tickled self._tcp_server.handle_request() npi = self._tcp_server.get_next_poll_interval() or self._poll_interval if wfds: # Timeout log_debug(5,"Notifying jabber nodes") self._tcp_server.notify_jabber_nodes() else: log_debug(5,"Not notifying jabber nodes") _query_reap_pinged_clients = rhnSQL.Statement(""" update rhnPushClient set state_id = :offline_id where state_id = :online_id and last_ping_time is not null and current_timestamp > next_action_time """) def reap_pinged_clients(self): # Get the online and offline ids online_id = self._get_push_state_id('online') offline_id = self._get_push_state_id('offline') h = rhnSQL.prepare(self._query_reap_pinged_clients) ret = h.execute(online_id=online_id, offline_id=offline_id) if ret: # We have changed something rhnSQL.commit() _query_fetch_clients_to_be_pinged = rhnSQL.Statement("""
class Server(ServerWrapper): """ Main Server class """ def __init__(self, user, arch=None, org_id=None): ServerWrapper.__init__(self) self.user = user # Use the handy TableRow self.server = rhnSQL.Row("rhnServer", "id") self.server["release"] = "" self.server["os"] = "SUSE Linux" self.addr = {} self.is_rpm_managed = 0 self.set_arch(arch) # We only get this passed in when we create a new # entry. Usually a reload will create a dummy entry first and # then call self.loadcert() if user: self.server["org_id"] = user.customer["id"] elif org_id: self.server["org_id"] = org_id self.cert = None # Also, at this point we know that this is a real server self.type = "REAL" self.default_description() # custom info values self.custom_info = None # uuid self.uuid = None self.virt_uuid = None self.registration_number = None _query_lookup_arch = rhnSQL.Statement(""" select sa.id, case when at.label = 'rpm' then 1 else 0 end is_rpm_managed from rhnServerArch sa, rhnArchType at where sa.label = :archname and sa.arch_type_id = at.id """) def set_arch(self, arch): self.archname = arch # try to detect the archid if arch is None: return arch = rhnLib.normalize_server_arch(arch) h = rhnSQL.prepare(self._query_lookup_arch) h.execute(archname=arch) data = h.fetchone_dict() if not data: # Log it to disk, it may show interesting things log_error("Attempt to create server with invalid arch `%s'" % arch) raise rhnFault(24, _("Architecture `%s' is not supported") % arch) self.server["server_arch_id"] = data["id"] self.is_rpm_managed = data['is_rpm_managed'] # set the default description... def default_description(self): self.server["description"] = "Initial Registration Parameters:\n"\ "OS: %s\n"\ "Release: %s\n"\ "CPU Arch: %s" % ( self.server["os"], self.server["release"], self.archname) def __repr__(self): # misa: looks like id can return negative numbers, so use %d # instead of %x # For the gory details, # http://mail.python.org/pipermail/python-dev/2005-February/051559.html return "<Server Class at %d: %s>\n" % (id(self), { "self.cert": self.cert, "self.server": self.server.data, }) __str__ = __repr__ def _get_active_org_admins(self, org_id): h = rhnSQL.prepare(""" SELECT login FROM web_contact WHERE id in ( SELECT ugm.user_id FROM rhnUserGroupMembers ugm WHERE ugm.user_group_id = (SELECT id FROM rhnUserGroup WHERE org_id = :org_id AND group_type = (SELECT id FROM rhnUserGroupType WHERE label = 'org_admin')) AND exists (select wc.id from rhnwebcontactenabled wc where wc.id = ugm.user_id) ORDER BY ugm.user_id); """) h.execute(org_id=org_id) rows = h.fetchall_dict() return rows # Return a Digital Certificate that can be placed in a file on the # client side. def system_id(self): log_debug(3, self.server, self.cert) if self.cert is None: # need to instantiate it cert = Certificate() cert["system_id"] = self.server["digital_server_id"] cert["os_release"] = self.server["release"] cert["operating_system"] = self.server["os"] cert["architecture"] = self.archname cert["profile_name"] = self.server["name"] cert["description"] = self.server["description"] if not self.user: log_debug(1, "The username is not available. Taking an active " \ "administrator from the same organization") self.user = rhnUser.search( self._get_active_org_admins( self.server["org_id"])[0]["login"]) cert["username"] = self.user.contact["login"] cert["type"] = self.type cert.set_secret(self.server["secret"]) self.cert = cert return self.cert.certificate() # return the id of this system def getid(self): if not self.server.has_key("id"): sysid = rhnSQL.Sequence("rhn_server_id_seq")() self.server["digital_server_id"] = "ID-%09d" % sysid # we can't reset the id column, so we need to poke into # internals. kind of illegal, but it works... self.server.data["id"] = (sysid, 0) else: sysid = self.server["id"] return sysid # change the base channel of a server def change_base_channel(self, new_rel, suse_products=None): log_debug(3, self.server["id"], new_rel) old_rel = self.server["release"] current_channels = rhnChannel.channels_for_server(self.server["id"]) # Extract the base channel off of old_base = [x for x in current_channels if not x['parent_channel']] # Quick sanity check base_channels_count = len(old_base) if base_channels_count == 1: old_base = old_base[0] elif base_channels_count == 0: old_base = None else: raise rhnException( "Server %s subscribed to multiple base channels" % (self.server["id"], )) # bz 442355 # Leave custom base channels alone, don't alter any of the channel subscriptions if not CFG.RESET_BASE_CHANNEL and old_base and rhnChannel.isCustomChannel( old_base["id"]): log_debug( 3, "Custom base channel detected, will not alter channel subscriptions" ) self.server["release"] = new_rel self.server.save() msg = """The SUSE Manager Update Agent has detected a change in the base version of the operating system running on your system, additionally you are subscribed to a custom channel as your base channel. Due to this configuration your channel subscriptions will not be altered. """ self.add_history( "Updated system release from %s to %s" % (old_rel, new_rel), msg) self.save_history_byid(self.server["id"]) return 1 s = rhnChannel.LiteServer().init_from_server(self) s.release = new_rel s.arch = self.archname if suse_products: s.suse_products = suse_products # Let get_server_channels deal with the errors and raise rhnFault target_channels = rhnChannel.guess_channels_for_server(s, none_ok=True) if target_channels: target_base = [ x for x in target_channels if not x['parent_channel'] ][0] else: target_base = None channels_to_subscribe = [] channels_to_unsubscribe = [] if old_base and target_base and old_base['id'] == target_base['id']: # Same base channel. Preserve the currently subscribed child # channels, just add the ones that are missing hash = {} for c in current_channels: hash[c['id']] = c for c in target_channels: channel_id = c['id'] if channel_id in hash: # Already subscribed to this one del hash[channel_id] continue # Have to subscribe to this one channels_to_subscribe.append(c) # We don't want to lose subscriptions to prior channels, so don't # do anything with hash.values() else: # Different base channel channels_to_unsubscribe = current_channels channels_to_subscribe = target_channels rhnSQL.transaction("change_base_channel") self.server["release"] = new_rel self.server.save() if not (channels_to_subscribe or channels_to_unsubscribe): # Nothing to do, just add the history entry self.add_history("Updated system release from %s to %s" % (old_rel, new_rel)) self.save_history_byid(self.server["id"]) return 1 # XXX: need a way to preserve existing subscriptions to # families so we can restore access to non-public ones. rhnChannel.unsubscribe_channels(self.server["id"], channels_to_unsubscribe) rhnChannel.subscribe_channels(self.server["id"], channels_to_subscribe) # now that we changed, recompute the errata cache for this one rhnSQL.Procedure("queue_server")(self.server["id"]) # Make a history note sub_channels = rhnChannel.channels_for_server(self.server["id"]) if sub_channels: channel_list = [a["name"] for a in sub_channels] msg = """The SUSE Manager Update Agent has detected a change in the base version of the operating system running on your system and has updated your channel subscriptions to reflect that. Your server has been automatically subscribed to the following channels:\n%s\n""" % (string.join(channel_list, "\n"), ) else: msg = """*** ERROR: *** While trying to subscribe this server to software channels: There are no channels serving release %s""" % new_rel self.add_history( "Updated system release from %s to %s" % (old_rel, new_rel), msg) self.save_history_byid(self.server["id"]) return 1 def take_snapshot(self, reason): return server_lib.snapshot_server(self.server['id'], reason) # returns true iff the base channel assigned to this system # has been end-of-life'd def base_channel_is_eol(self): h = rhnSQL.prepare(""" select 1 from rhnChannel c, rhnServerChannel sc where sc.server_id = :server_id and sc.channel_id = c.id and c.parent_channel IS NULL and current_timestamp - c.end_of_life > 0 """) h.execute(server_id=self.getid()) ret = h.fetchone_dict() if ret: return 1 return None _query_server_custom_info = rhnSQL.Statement(""" select cdk.label, scdv.value from rhnCustomDataKey cdk, rhnServerCustomDataValue scdv where scdv.server_id = :server_id and scdv.key_id = cdk.id """) def load_custom_info(self): self.custom_info = {} h = rhnSQL.prepare(self._query_server_custom_info) h.execute(server_id=self.getid()) rows = h.fetchall_dict() if not rows: log_debug(4, "no custom info values") return for row in rows: self.custom_info[row['label']] = row['value'] # load additional server information from the token definition def load_token(self): # Fetch token tokens_obj = rhnFlags.get("registration_token") if not tokens_obj: # No tokens present return 0 # make sure we have reserved a server_id. most likely if this # is a new server object (just created from # registration.new_system) then we have no associated a # server["id"] yet -- and getid() will reserve that for us. self.getid() # pull in the extra information needed to fill in the # required registration fields using tokens user_id = tokens_obj.get_user_id() org_id = tokens_obj.get_org_id() self.user = rhnUser.User("", "") if user_id is not None: self.user.reload(user_id) self.server["creator_id"] = user_id self.server["org_id"] = org_id self.server["contact_method_id"] = tokens_obj.get_contact_method_id() return 0 # perform the actions required by the token (subscribing to # channels, server groups, etc) def use_token(self): # Fetch token tokens_obj = rhnFlags.get("registration_token") if not tokens_obj: # No token present return 0 is_rereg_token = tokens_obj.is_rereg_token # We get back a history of what is being done in the # registration process history = server_token.process_token(self.server, self.archname, tokens_obj, self.virt_type) if is_rereg_token: event_name = "Reactivation via Token" event_text = "System reactivated" else: event_name = "Subscription via Token" event_text = "System created" token_name = tokens_obj.get_names() # now record that history nicely self.add_history( event_name, "%s with token <strong>%s</strong><br />\n%s" % (event_text, token_name, history)) self.save_history_byid(self.server["id"]) # 6/23/05 wregglej 157262, use get_kickstart session_id() to see if we're in the middle of a kickstart. ks_id = tokens_obj.get_kickstart_session_id() # 4/5/05 wregglej, Added for bugzilla: 149932. Actions need to be flushed on reregistration. # 6/23/05 wregglej 157262, don't call flush_actions() if we're in the middle of a kickstart. # It would cause all of the remaining kickstart actions to get flushed, which is bad. if is_rereg_token and ks_id is None: self.flush_actions() # XXX: will need to call self.save() later to commit all that return 0 def disable_token(self): tokens_obj = rhnFlags.get('registration_token') if not tokens_obj: # Nothing to do return if not tokens_obj.is_rereg_token: # Not a re-registration token - nothing to do return # Re-registration token - we know for sure there is only one token_server_id = tokens_obj.get_server_id() if token_server_id != self.getid(): # Token is not associated with this server (it may actually not be # associated with any server) return server_token.disable_token(tokens_obj) # save() will commit this # Auto-entitlement: attempt to entitle this server to the highest # entitlement that is available def autoentitle(self): entitlement_hierarchy = ['enterprise_entitled'] any_base_entitlements = 0 for entitlement in entitlement_hierarchy: try: self._entitle(entitlement) any_base_entitlements = 1 except rhnSQL.SQLSchemaError: e = sys.exc_info()[1] if e.errno == 20287: # ORA-20287: (invalid_entitlement) - The server can not be # entitled to the specified level # # ignore for now, since any_base_entitlements will throw # an error at the end if not set continue # Should not normally happen log_error("Failed to entitle", self.server["id"], entitlement, e.errmsg) raise_with_tb( server_lib.rhnSystemEntitlementException( "Unable to entitle"), sys.exc_info()[2]) except rhnSQL.SQLError: e = sys.exc_info()[1] log_error("Failed to entitle", self.server["id"], entitlement, str(e)) raise_with_tb( server_lib.rhnSystemEntitlementException( "Unable to entitle"), sys.exc_info()[2]) else: if any_base_entitlements: # All is fine return else: raise_with_tb(server_lib.rhnNoSystemEntitlementsException, sys.exc_info()[2]) def _entitle(self, entitlement): system_entitlements = server_lib.check_entitlement(self.server["id"]) system_entitlements = list(system_entitlements.keys()) if entitlement not in system_entitlements: entitle_server = rhnSQL.Procedure( "rhn_entitlements.entitle_server") entitle_server(self.server['id'], entitlement) def create_perm_cache(self): log_debug(4) create_perms = rhnSQL.Procedure("rhn_cache.update_perms_for_server") create_perms(self.server['id']) def gen_secret(self): # Running this invalidates the cert self.cert = None self.server["secret"] = gen_secret() _query_update_uuid = rhnSQL.Statement(""" update rhnServerUuid set uuid = :uuid where server_id = :server_id """) _query_insert_uuid = rhnSQL.Statement(""" insert into rhnServerUuid (server_id, uuid) values (:server_id, :uuid) """) def update_uuid(self, uuid, commit=1): log_debug(3, uuid) # XXX Should determine a way to do this dinamically uuid_col_length = 36 if uuid is not None: uuid = str(uuid) if not uuid: log_debug('Nothing to do') return uuid = uuid[:uuid_col_length] server_id = self.server['id'] log_debug(4, "Trimmed uuid", uuid, server_id) # Update this server's UUID (unique client identifier) h = rhnSQL.prepare(self._query_update_uuid) ret = h.execute(server_id=server_id, uuid=uuid) log_debug(4, "execute returned", ret) if ret != 1: # Row does not exist, have to create it h = rhnSQL.prepare(self._query_insert_uuid) h.execute(server_id=server_id, uuid=uuid) if commit: rhnSQL.commit() def handle_virtual_guest(self): # Handle virtualization specific bits if self.virt_uuid and self.virt_type: rhnVirtualization._notify_guest(self.getid(), self.virt_uuid, self.virt_type) # Save this record in the database def __save(self, channel): tokens_obj = rhnFlags.get("registration_token") if self.server.real: server_id = self.server["id"] self.server.save() else: # create new entry self.gen_secret() server_id = self.getid() org_id = self.server["org_id"] if self.user: user_id = self.user.getid() else: user_id = None # some more default values self.server["auto_update"] = "N" if self.user and not self.server.has_key("creator_id"): # save the link to the user that created it if we have # that information self.server["creator_id"] = self.user.getid() # and create the server entry self.server.create(server_id) server_lib.create_server_setup(server_id, org_id) self.handle_virtual_guest() # if we're using a token, then the following channel # subscription request can allow no matches since the # token code will fix up or fail miserably later. # subscribe the server to applicable channels # bretm 02/17/2007 -- TODO: refactor activation key codepaths # to allow us to not have to pass in none_ok=1 in any case # # This can now throw exceptions which will be caught at a higher level if channel is not None: channel_info = dict(rhnChannel.channel_info(channel)) log_debug(4, "eus channel id %s" % str(channel_info)) rhnChannel.subscribe_sql(server_id, channel_info['id']) else: rhnChannel.subscribe_server_channels(self, none_ok=tokens_obj, user_id=user_id) if not tokens_obj: # Attempt to auto-entitle, can throw the following exceptions: # rhnSystemEntitlementException # rhnNoSystemEntitlementsException self.autoentitle() # If a new server that was registered by an user (i.e. not # with a registration token), look for this user's default # groups self.join_groups() server_lib.join_rhn(org_id) # Update virtual guest attributes on re-registration if getattr(tokens_obj, "is_rereg_token", False): self.handle_virtual_guest() # Update the uuid - but don't commit yet self.update_uuid(self.uuid, commit=0) self.create_perm_cache() # And save the extra profile data... self.save_suse_products_byid(server_id) self.save_packages_byid(server_id, schedule=1) self.save_hardware_byid(server_id) self.save_history_byid(server_id) return 0 # This is a wrapper for the above class that allows us to rollback # any changes in case we don't succeed completely def save(self, commit=1, channel=None): log_debug(3) # attempt to preserve pending changes before we were called, # so we set up our own transaction checkpoint rhnSQL.transaction("save_server") try: self.__save(channel) except: # roll back to what we have before and raise again rhnSQL.rollback("save_server") # shoot the exception up the chain raise else: # if we want to commit, commit all pending changes if commit: rhnSQL.commit() try: search = SearchNotify() search.notify() except Exception: e = sys.exc_info()[1] log_error("Exception caught from SearchNotify.notify().", e) return 0 # Reload the current configuration from database using a server id. def reload(self, server, reload_all=0): log_debug(4, server, "reload_all = %d" % reload_all) if not self.server.load(int(server)): log_error("Could not find server record for reload", server) raise rhnFault(29, "Could not find server record in the database") self.cert = None # it is lame that we have to do this h = rhnSQL.prepare(""" select label from rhnServerArch where id = :archid """) h.execute(archid=self.server["server_arch_id"]) data = h.fetchone_dict() if not data: raise rhnException( "Found server with invalid numeric " "architecture reference", self.server.data) self.archname = data['label'] # we don't know this one anymore (well, we could look for, but # why would we do that?) self.user = None self.addr.update(self.fetch_addr()) # XXX: Fix me if reload_all: if not self.reload_packages_byid(self.server["id"]) == 0: return -1 if not self.reload_hardware_byid(self.server["id"]) == 0: return -1 return 0 # Reload primary IP information from database def fetch_addr(self): server = self.getid() ret = {} h = rhnSQL.prepare(""" select address as ipaddr from rhnservernetinterface rsni join rhnservernetaddress4 rsna4 on rsna4.interface_id = rsni.id where is_primary = 'Y' and server_id = :serverid """) h.execute(serverid=server) data = h.fetchone_dict() if data: ret.update(data) h = rhnSQL.prepare(""" select address as ip6addr from rhnservernetinterface rsni join rhnservernetaddress6 rsna4 on rsna4.interface_id = rsni.id where is_primary = 'Y' and server_id = :serverid """) h.execute(serverid=server) data = h.fetchone_dict() if data: ret.update(data) return ret # Use the values we find in the cert to cause a reload of this # server from the database. def loadcert(self, cert, load_user=1): log_debug(4, cert) # certificate is presumed to be already verified if not isinstance(cert, Certificate): return -1 # reload the whole thing based on the cert data server = cert["system_id"] row = server_lib.getServerID(server) if row is None: return -1 sid = row["id"] # standard reload based on an ID ret = self.reload(sid) if not ret == 0: return ret # the reload() will never be able to fill in the username. It # would require from the database standpoint insuring that for # a given server we can have only one owner at any given time. # cert includes it and it's valid because it has been verified # through checksuming before we got here self.user = None # Load the user if at all possible. If it's not possible, # self.user will be None, which should be a handled case wherever # self.user is used. if load_user: # Load up the username associated with this profile self.user = rhnUser.search(cert["username"]) # 4/27/05 wregglej - Commented out this block because it was causing problems # with rhn_check/up2date when the user that registered the system was deleted. # if not self.user: # log_error("Invalid username for server id", # cert["username"], server, cert["profile_name"]) # raise rhnFault(9, "Invalid username '%s' for server id %s" %( # cert["username"], server)) # XXX: make sure that the database thinks that the server # registrnt is the same as this certificate thinks. The # certificate passed checksum checks, but it never hurts to be # too careful now with satellites and all. return 0 # Is this server entitled? def check_entitlement(self): if not self.server.has_key("id"): return None log_debug(3, self.server["id"]) return server_lib.check_entitlement(self.server['id']) def checkin(self, commit=1): """ convenient wrapper for these thing until we clean the code up """ if not self.server.has_key("id"): return 0 # meaningless if rhnFault not raised return server_lib.checkin(self.server["id"], commit) def throttle(self): """ convenient wrapper for these thing until we clean the code up """ if not self.server.has_key("id"): return 1 # meaningless if rhnFault not raised return server_lib.throttle(self.server) def set_qos(self): """ convenient wrapper for these thing until we clean the code up """ if not self.server.has_key("id"): return 1 # meaningless if rhnFault not raised return server_lib.set_qos(self.server["id"]) def join_groups(self): """ For a new server, join server groups """ # Sanity check - we should always have a user if not self.user: raise rhnException("User not specified") server_id = self.getid() user_id = self.user.getid() h = rhnSQL.prepare(""" select system_group_id from rhnUserDefaultSystemGroups where user_id = :user_id """) h.execute(user_id=user_id) while 1: row = h.fetchone_dict() if not row: break server_group_id = row['system_group_id'] log_debug(5, "Subscribing server to group %s" % server_group_id) server_lib.join_server_group(server_id, server_group_id) def fetch_registration_message(self): return rhnChannel.system_reg_message(self) def process_kickstart_info(self): log_debug(4) tokens_obj = rhnFlags.get("registration_token") if not tokens_obj: log_debug(4, "no registration token found") # Nothing to do here return # If there are kickstart sessions associated with this system (other # than, possibly, the current one), mark them as failed history = server_kickstart.terminate_kickstart_sessions(self.getid()) for k, v in history: self.add_history(k, v) kickstart_session_id = tokens_obj.get_kickstart_session_id() if kickstart_session_id is None: log_debug( 4, "No kickstart_session_id associated with token %s (%s)" % (tokens_obj.get_names(), tokens_obj.tokens)) # Nothing to do here return # Flush server actions self.flush_actions() server_id = self.getid() action_id = server_kickstart.schedule_kickstart_sync( server_id, kickstart_session_id) server_kickstart.subscribe_to_tools_channel(server_id, kickstart_session_id) server_kickstart.schedule_virt_pkg_install(server_id, kickstart_session_id) # Update the next action to the newly inserted one server_kickstart.update_ks_session_table(kickstart_session_id, 'registered', action_id, server_id) def flush_actions(self): server_id = self.getid() h = rhnSQL.prepare(""" select action_id from rhnServerAction where server_id = :server_id and status in (0, 1) -- Queued or Picked Up """) h.execute(server_id=server_id) while 1: row = h.fetchone_dict() if not row: break action_id = row['action_id'] rhnAction.update_server_action( server_id=server_id, action_id=action_id, status=3, result_code=-100, result_message= "Action canceled: system kickstarted or reregistered" ) # 4/6/05 wregglej, added the "or reregistered" part. def server_locked(self): """ Returns true is the server is locked (for actions that are blocked) """ server_id = self.getid() h = rhnSQL.prepare(""" select 1 from rhnServerLock where server_id = :server_id """) h.execute(server_id=server_id) row = h.fetchone_dict() if row: return 1 return 0 def register_push_client(self): """ insert or update rhnPushClient for this server_id """ server_id = self.getid() ret = server_lib.update_push_client_registration(server_id) return ret def register_push_client_jid(self, jid): """ update the JID in the corresponing entry from rhnPushClient """ server_id = self.getid() ret = server_lib.update_push_client_jid(server_id, jid) return ret
class Applet(rhnHandler): def __init__(self): rhnHandler.__init__(self) # Exposed Errata functions: self.functions = [] self.functions.append("poll_status") self.functions.append("poll_packages") self.functions.append("tie_uuid") self.functions.append("has_base_channel") _query_lookup_server = rhnSQL.Statement(""" select s.id from rhnServer s, rhnServerUuid su where su.uuid = :uuid and su.server_id = s.id order by modified desc """) _query_lookup_base_channel = rhnSQL.Statement(""" select c.label from rhnChannel c, rhnServerChannel sc where sc.server_id = :server_id and sc.channel_id = c.id and c.parent_channel is null """) def has_base_channel(self, uuid): log_debug(1, uuid) # Verifies if a system has a base channel h = rhnSQL.prepare(self._query_lookup_server) h.execute(uuid=uuid) row = h.fetchone_dict() if not row: raise rhnFault(140, _("Your system was not found in the RHN database"), explain=0) server_id = row['id'] h = rhnSQL.prepare(self._query_lookup_base_channel) h.execute(server_id=server_id) row = h.fetchone_dict() if row: return 1 return 0 # ties a uuid to an rhnServer.id def tie_uuid(self, systemid, uuid): log_debug(1, uuid) systemid = str(systemid) uuid = str(uuid) server = self.auth_system(systemid) if not uuid: # Nothing to do return server.update_uuid(uuid) return 1 # return our sttaus - for now a dummy function def poll_status(self): checkin_interval = (CFG.CHECKIN_INTERVAL + random.random() * CFG.CHECKIN_INTERVAL_MAX_OFFSET) return { 'checkin_interval': int(checkin_interval), 'server_status': 'normal' } # poll for latest packages for the RHN Applet def poll_packages(self, release, server_arch, timestamp=0, uuid=None): log_debug(1, release, server_arch, timestamp, uuid) # make sure we're dealing with strings here release = str(release) server_arch = rhnLib.normalize_server_arch(server_arch) timestamp = str(timestamp) uuid = str(uuid) # get a list of acceptable channels channel_list = [] channel_list = rhnChannel.applet_channels_for_uuid(uuid) # it's possible the tie between uuid and rhnServer.id wasn't yet # made, default to normal behavior if not channel_list: channel_list = rhnChannel.get_channel_for_release_arch( release, server_arch) channel_list = [channel_list] # bork if no channels returned if not channel_list: log_debug( 8, "No channels for release = '%s', arch = '%s', uuid = '%s'" % (release, server_arch, uuid)) return {'last_modified': 0, 'contents': []} last_channel_changed_ts = max( [a["last_modified"] for a in channel_list]) # make satellite content override a cache caused by hosted last_channel_changed_ts = str(LongType(last_channel_changed_ts) + 1) # gotta be careful about channel unsubscriptions... client_cache_invalidated = None # we return rhnServer.channels_changed for each row # in the satellite case, pluck it off the first... if "server_channels_changed" in channel_list[0]: sc_ts = channel_list[0]["server_channels_changed"] if sc_ts and (sc_ts >= last_channel_changed_ts): client_cache_invalidated = 1 if (last_channel_changed_ts <= timestamp) and (not client_cache_invalidated): # XXX: I hate these freaking return codes that return # different members in the dictinary depending on what # sort of data you get log_debug(3, "Client has current data") return {'use_cached_copy': 1} # we'll have to return something big - compress rhnFlags.set("compress_response", 1) # Mark the response as being already XMLRPC-encoded rhnFlags.set("XMLRPC-Encoded-Response", 1) # next, check the cache if we have something with this timestamp label_list = [str(a["id"]) for a in channel_list] label_list.sort() log_debug(4, "label_list", label_list) cache_key = "applet-poll-%s" % string.join(label_list, "-") ret = rhnCache.get(cache_key, last_channel_changed_ts) if ret: # we have a good entry with matching timestamp log_debug(3, "Cache HIT for", cache_key) return ret # damn, need to do some real work from chip's requirements: # The package list should be an array of hashes with the keys # nvre, name, version, release, epoch, errata_advisory, # errata_id, with the errata fields being empty strings if the # package isn't from an errata. ret = {'last_modified': last_channel_changed_ts, 'contents': []} # we search for packages only in the allowed channels - build # the SQL helper string and dictionary to make the foo IN ( # list ) constructs use bind variables qlist = [] qdict = {} for c in channel_list: v = c["id"] k = "channel_%s" % v qlist.append(":%s" % k) qdict[k] = v qlist = string.join(qlist, ", ") # This query is kind of big. One of these days I'm gonna start # pulling them out and transforming them into views. We can # also simulate this using several functions exposed out of # rhnChannel, but there is no difference in speed because we # need to do more than one query; besides, we cache the hell # out of it h = rhnSQL.prepare(""" select distinct pn.name, pe.version, pe.release, pe.epoch, e_sq.errata_advisory, e_sq.errata_synopsis, e_sq.errata_id from rhnPackageName pn, rhnPackageEVR pe, rhnChannelNewestPackage cnp left join ( select sq_e.id as errata_id, sq_e.synopsis as errata_synopsis, sq_e.advisory as errata_advisory, sq_ep.package_id from rhnErrata sq_e, rhnErrataPackage sq_ep, rhnChannelErrata sq_ce where sq_ce.errata_id = sq_ep.errata_id and sq_ce.errata_id = sq_e.id and sq_ce.channel_id in ( %s ) ) e_sq on cnp.package_id = e_sq.package_id where cnp.channel_id in ( %s ) and cnp.name_id = pn.id and cnp.evr_id = pe.id """ % (qlist, qlist)) h.execute(**qdict) plist = h.fetchall_dict() if not plist: # We've set XMLRPC-Encoded-Response above ret = xmlrpclib.dumps((ret, ), methodresponse=1) return ret contents = {} for p in plist: for k in list(p.keys()): if p[k] is None: p[k] = "" p["nevr"] = "%s-%s-%s:%s" % (p["name"], p["version"], p["release"], p["epoch"]) p["nvr"] = "%s-%s-%s" % (p["name"], p["version"], p["release"]) pkg_name = p["name"] if pkg_name in contents: stored_pkg = contents[pkg_name] s = [ stored_pkg["name"], stored_pkg["version"], stored_pkg["release"], stored_pkg["epoch"] ] n = [p["name"], p["version"], p["release"], p["epoch"]] log_debug(7, "comparing vres", s, n) if rhn_rpm.nvre_compare(s, n) < 0: log_debug(7, "replacing %s with %s" % (pkg_name, p)) contents[pkg_name] = p else: # already have a higher vre stored... pass else: log_debug(7, "initial store for %s" % pkg_name) contents[pkg_name] = p ret["contents"] = list(contents.values()) # save it in the cache # We've set XMLRPC-Encoded-Response above ret = xmlrpclib.dumps((ret, ), methodresponse=1) rhnCache.set(cache_key, ret, last_channel_changed_ts) return ret
class Queue(rhnHandler): """ XMLRPC queue functions that we will provide for the outside world. """ def __init__(self): """ Add a list of functions we are willing to server out. """ rhnHandler.__init__(self) self.functions.append('get') self.functions.append('get_future_actions') self.functions.append('length') self.functions.append('submit') # XXX I am not proud of this. There should be a generic way to map # the client's error codes into success status codes self.action_type_completed_codes = { 'errata.update': { 39: None, }, } def __getV1(self, action): """ Fetches old queued actions for the client version 1. """ log_debug(3, self.server_id) actionId = action['id'] method = action["method"] if method == 'packages.update': xml = self.__packageUpdate(actionId) elif method == 'errata.update': xml = self.__errataUpdate(actionId) elif method == 'hardware.refresh_list': xml = xmlrpclib.dumps(("hardware", ), methodname="client.refresh") elif method == 'packages.refresh_list': xml = xmlrpclib.dumps(("rpmlist", ), methodname="client.refresh") else: # Unrecognized, skip raise InvalidAction("Action method %s unsupported by " "Update Agent Client" % method) # all good return {'id': actionId, 'version': 1, 'action': xml} def __getV2(self, action, dry_run=0): """ Fetches queued actions for the clients version 2+. """ log_debug(3, self.server_id) # Get the root dir of this install try: method = getMethod.getMethod(action['method'], 'server.action') except getMethod.GetMethodException: Traceback("queue.get V2") raise EmptyAction("Could not get a valid method for %s" % (action['method'], )), None, sys.exc_info()[2] # Call the method result = method(self.server_id, action['id'], dry_run) if result is None: # None are mapped to the empty list result = () elif not isinstance(result, TupleType): # Everything other than a tuple is wrapped in a tuple result = (result, ) xmlblob = xmlrpclib.dumps(result, methodname=action['method']) log_debug(5, "returning xmlblob for action", xmlblob) return { 'id': action['id'], 'action': xmlblob, 'version': action['version'], } def __update_status(self, status): """ Update the runnng kernel and the last boot values for this server from the status dictionary passed on queue checkin. Record last running kernel and uptime. Only update last_boot if it has changed by more than five minutes. We don't know the timezone the server is in. or even if its clock is right, but we do know it can properly track seconds since it rebooted, and use our own clocks to keep proper track of the actual time. """ rhnSQL.set_log_auth_login('CLIENT') if status.has_key('uname'): kernelver = status['uname'][2] if kernelver != self.server.server["running_kernel"]: self.server.server["running_kernel"] = kernelver # XXX:We should be using Oracle's sysdate() for this management # In the case of multiple app servers in mutiple time zones all the # results are skewed. if status.has_key('uptime'): uptime = status['uptime'] if isinstance(uptime, type([])) and len(uptime): # Toss the other values. For now uptime = uptime[0] try: uptime = float(uptime) except ValueError: # Wrong value passed by the client pass else: last_boot = time.time() - uptime if abs(last_boot - self.server.server["last_boot"]) > 60 * 5: self.server.server["last_boot"] = last_boot # this is smart enough to do a NOOP if nothing changed. self.server.server.save() def __should_snapshot(self): log_debug(4, self.server_id, "determining whether to snapshot...") entitlements = self.server.check_entitlement() if not entitlements.has_key("provisioning_entitled"): return 0 # ok, take the snapshot before attempting this action return 1 def _invalidate_child_actions(self, action_id): f_action_ids = rhnAction.invalidate_action(self.server_id, action_id) for f_action_id in f_action_ids: # Invalidate any kickstart session that depends on this action server_kickstart.update_kickstart_session(self.server_id, f_action_id, action_status=3, kickstart_state='failed', next_action_type=None) return f_action_ids def _invalidate_failed_prereq_actions(self): h = rhnSQL.prepare(""" select sa.action_id, a.prerequisite from rhnServerAction sa, rhnAction a where sa.server_id = :server_id and sa.action_id = a.id and sa.status in (0, 1) -- Queued or picked up and a.prerequisite is not null and exists ( select 1 from rhnServerAction where server_id = sa.server_id and action_id = a.prerequisite and status = 3 -- failed ) """) h.execute(server_id=self.server_id) while 1: row = h.fetchone_dict() if not row: break action_id, prereq_action_id = row['action_id'], row['prerequisite'] self._invalidate_child_actions(action_id) _query_future_enabled = rhnSQL.Statement(""" select staging_content_enabled from rhnOrgConfiguration oc, rhnServer s where s.org_id = oc.org_id and s.id = :server_id """) def _future_actions_enabled(self): """ Returns true if staging content is enabled for this system """ h = rhnSQL.prepare(self._query_future_enabled) h.execute(server_id=self.server_id) row = h.fetchone_dict() log_debug(4, row["staging_content_enabled"]) return row["staging_content_enabled"] == "Y" _query_queue_future = rhnSQL.Statement(""" select sa.action_id id, a.version, sa.remaining_tries, at.label method, at.unlocked_only, a.prerequisite from rhnServerAction sa, rhnAction a, rhnActionType at where sa.server_id = :server_id and sa.action_id = a.id and a.action_type = at.id and sa.status in (0, 1) -- Queued or picked up and a.earliest_action <= current_timestamp + numtodsinterval(:time_window * 3600, 'second') -- Check earliest_action and at.label in ('packages.update', 'errata.update', 'packages.runTransaction', 'packages.fullUpdate') order by a.earliest_action, a.prerequisite nulls first, a.id """) def get_future_actions(self, system_id, time_window): """ return actions which are scheduled within next /time_window/ hours """ self.auth_system(system_id) log_debug(3, "Checking for future actions within %d hours" % time_window) result = [] if self._future_actions_enabled(): h = rhnSQL.prepare(self._query_queue_future) h.execute(server_id=self.server_id, time_window=time_window) action = h.fetchone_dict() while action: log_debug(5, action) result.append(self.__getV2(action, dry_run=1)) action = h.fetchone_dict() return result _query_queue_get = rhnSQL.Statement(""" select sa.action_id id, a.version, sa.remaining_tries, at.label method, at.unlocked_only, a.prerequisite from rhnServerAction sa, rhnAction a, rhnActionType at where sa.server_id = :server_id and sa.action_id = a.id and a.action_type = at.id and sa.status in (0, 1) -- Queued or picked up and a.earliest_action <= current_timestamp -- Check earliest_action and not exists ( select 1 from rhnServerAction sap where sap.server_id = :server_id and sap.action_id = a.prerequisite and sap.status != 2 -- completed ) order by a.earliest_action, a.prerequisite nulls first, a.id """) # Probably we need to figure out if we really need to split these two. def get(self, system_id, version=1, status={}): # Authenticate the system certificate if CFG.DISABLE_CHECKINS: self.update_checkin = 0 else: self.update_checkin = 1 self.auth_system(system_id) log_debug(1, self.server_id, version, "checkins %s" % ["disabled", "enabled"][self.update_checkin]) if status: self.__update_status(status) # Update the capabilities list rhnCapability.update_client_capabilities(self.server_id) # Invalidate failed actions self._invalidate_failed_prereq_actions() server_locked = self.server.server_locked() log_debug(3, "Server locked", server_locked) ret = {} # get the action. Status codes are currently: # 0 Queued # 1 Picked Up # 2 Completed # 3 Failed # XXX: we should really be using labels from rhnActionType instead of # hard coded type id numbers. # We fetch actions whose prerequisites have completed, and actions # that don't have prerequisites at all h = rhnSQL.prepare(self._query_queue_get) should_execute = 1 # Loop to get a valid action # (only one valid action will be dealt with per execution of this function...) while 1: if should_execute: h.execute(server_id=self.server_id) should_execute = 0 # Okay, got an action action = h.fetchone_dict() if not action: # No actions available; bail out # Don't forget the commit at the end... ret = "" break action_id = action['id'] log_debug(4, "Checking action %s" % action_id) # okay, now we have the action - process it. if action['remaining_tries'] < 1: log_debug(4, "Action %s picked up too many times" % action_id) # We've run out of pickup attempts for this action... self.__update_action( action_id, status=3, message="This action has been picked up multiple times " "without a successful transaction; " "this action is now failed for this system.") # Invalidate actions that depend on this one self._invalidate_child_actions(action_id) # keep looking for a good action to process... continue if server_locked and action['unlocked_only'] == 'Y': # This action is locked log_debug( 4, "server id %s locked for action id %s" % (self.server_id, action_id)) continue try: if version == 1: ret = self.__getV1(action) else: ret = self.__getV2(action) except ShadowAction, e: # Action the client should not see # Make sure we re-execute the query, so we pick up whatever # extra actions were added should_execute = 1 text = e.args[0] log_debug(4, "Shadow Action", text) self.__update_action(action['id'], 2, 0, text) continue except InvalidAction, e: # This is an invalid action # Update its status so it won't bother us again text = e.args[0] log_debug(4, "Invalid Action", text) self.__update_action(action['id'], 3, -99, text) continue except EmptyAction, e: # this means that we have some sort of internal error # which gets reported in the logs. We don't touch the # action because this should get fixed on our side. log_error("Can not process action data", action, e.args) ret = "" break
class Queue(rhnHandler): """ XMLRPC queue functions that we will provide for the outside world. """ def __init__(self): """ Add a list of functions we are willing to server out. """ rhnHandler.__init__(self) self.functions.append('get') self.functions.append('get_future_actions') self.functions.append('length') self.functions.append('submit') # XXX I am not proud of this. There should be a generic way to map # the client's error codes into success status codes self.action_type_completed_codes = { 'errata.update': { 39: None, }, } def __getV1(self, action): """ Fetches old queued actions for the client version 1. """ log_debug(3, self.server_id) actionId = action['id'] method = action["method"] if method == 'packages.update': xml = self.__packageUpdate(actionId) elif method == 'errata.update': xml = self.__errataUpdate(actionId) elif method == 'hardware.refresh_list': xml = xmlrpclib.dumps(("hardware",), methodname="client.refresh") elif method == 'packages.refresh_list': xml = xmlrpclib.dumps(("rpmlist",), methodname="client.refresh") else: # Unrecognized, skip raise InvalidAction("Action method %s unsupported by " "Update Agent Client" % method) # all good return {'id': actionId, 'version': 1, 'action': xml} def __getV2(self, action, dry_run=0): """ Fetches queued actions for the clients version 2+. """ log_debug(3, self.server_id) # Get the root dir of this install try: method = getMethod.getMethod(action['method'], 'server.action') except getMethod.GetMethodException: Traceback("queue.get V2") raise_with_tb(EmptyAction("Could not get a valid method for %s" % ( action['method'],)), sys.exc_info()[2]) # Call the method result = method(self.server_id, action['id'], dry_run) if result is None: # None are mapped to the empty list result = () elif not isinstance(result, TupleType): # Everything other than a tuple is wrapped in a tuple result = (result, ) xmlblob = xmlrpclib.dumps(result, methodname=action['method']) log_debug(5, "returning xmlblob for action", xmlblob) return { 'id': action['id'], 'action': xmlblob, 'version': action['version'], } def __update_status(self, status): """ Update the runnng kernel and the last boot values for this server from the status dictionary passed on queue checkin. Record last running kernel and uptime. Only update last_boot if it has changed by more than five seconds. We don't know the timezone the server is in. or even if its clock is right, but we do know it can properly track seconds since it rebooted, and use our own clocks to keep proper track of the actual time. """ rhnSQL.set_log_auth_login('CLIENT') if 'uname' in status: kernelver = status['uname'][2] if kernelver != self.server.server["running_kernel"]: self.server.server["running_kernel"] = kernelver # XXX:We should be using Oracle's sysdate() for this management # In the case of multiple app servers in mutiple time zones all the # results are skewed. if 'uptime' in status: uptime = status['uptime'] if isinstance(uptime, type([])) and len(uptime): # Toss the other values. For now uptime = uptime[0] try: uptime = float(uptime) except ValueError: # Wrong value passed by the client pass else: last_boot = time.time() - uptime if abs(last_boot - self.server.server["last_boot"]) > 5: self.server.server["last_boot"] = last_boot self.__set_reboot_action_to_succcess() # this is smart enough to do a NOOP if nothing changed. self.server.server.save() def __set_reboot_action_to_succcess(self): h = rhnSQL.prepare(""" update rhnServerAction set status = 2 where server_id = :server_id and action_id in ( select sa.action_id from rhnServerAction sa join rhnAction a on sa.action_id = a.id join rhnActionType at on a.action_type = at.id where sa.server_id = :server_id and sa.status = 1 and at.label = 'reboot.reboot' ) """) h.execute(server_id=self.server_id) def __should_snapshot(self): log_debug(4, self.server_id, "determining whether to snapshot...") entitlements = self.server.check_entitlement() if "enterprise_entitled" not in entitlements: return 0 # ok, take the snapshot before attempting this action return 1 def _invalidate_child_actions(self, action_id): f_action_ids = rhnAction.invalidate_action(self.server_id, action_id) for f_action_id in f_action_ids: # Invalidate any kickstart session that depends on this action server_kickstart.update_kickstart_session(self.server_id, f_action_id, action_status=3, kickstart_state='failed', next_action_type=None) return f_action_ids def _invalidate_failed_prereq_actions(self): h = rhnSQL.prepare(""" select sa.action_id, a.prerequisite from rhnServerAction sa, rhnAction a where sa.server_id = :server_id and sa.action_id = a.id and sa.status in (0, 1) -- Queued or picked up and a.prerequisite is not null and exists ( select 1 from rhnServerAction where server_id = sa.server_id and action_id = a.prerequisite and status = 3 -- failed ) """) h.execute(server_id=self.server_id) while 1: row = h.fetchone_dict() if not row: break action_id, prereq_action_id = row['action_id'], row['prerequisite'] self._invalidate_child_actions(action_id) _query_future_enabled = rhnSQL.Statement(""" select staging_content_enabled from rhnOrgConfiguration oc, rhnServer s where s.org_id = oc.org_id and s.id = :server_id """) def _future_actions_enabled(self): """ Returns true if staging content is enabled for this system """ h = rhnSQL.prepare(self._query_future_enabled) h.execute(server_id=self.server_id) row = h.fetchone_dict() log_debug(4, row["staging_content_enabled"]) return row["staging_content_enabled"] == "Y" _query_queue_future = rhnSQL.Statement(""" select sa.action_id id, a.version, sa.remaining_tries, at.label method, at.unlocked_only, a.prerequisite from rhnServerAction sa, rhnAction a, rhnActionType at where sa.server_id = :server_id and sa.action_id = a.id and a.action_type = at.id and sa.status in (0, 1) -- Queued or picked up and a.earliest_action <= current_timestamp + numtodsinterval(:time_window * 3600, 'second') -- Check earliest_action and at.label in ('packages.update', 'errata.update', 'packages.runTransaction', 'packages.fullUpdate') order by a.earliest_action, a.prerequisite nulls first, a.id """) def get_future_actions(self, system_id, time_window): """ return actions which are scheduled within next /time_window/ hours """ self.auth_system(system_id) log_debug(3, "Checking for future actions within %d hours" % time_window) result = [] if self._future_actions_enabled() and not self.__reboot_in_progress(): h = rhnSQL.prepare(self._query_queue_future) h.execute(server_id=self.server_id, time_window=time_window) action = h.fetchone_dict() while action: log_debug(5, action) result.append(self.__getV2(action, dry_run=1)) action = h.fetchone_dict() return result _query_queue_get = rhnSQL.Statement(""" select sa.action_id id, a.version, sa.remaining_tries, at.label method, at.unlocked_only, a.prerequisite from rhnServerAction sa, rhnAction a, rhnActionType at where sa.server_id = :server_id and sa.action_id = a.id and a.action_type = at.id and sa.status in (0, 1) -- Queued or picked up and a.earliest_action <= current_timestamp -- Check earliest_action and not exists ( select 1 from rhnServerAction sap where sap.server_id = :server_id and sap.action_id = a.prerequisite and sap.status != 2 -- completed ) order by a.earliest_action, a.prerequisite nulls first, a.id """) # Probably we need to figure out if we really need to split these two. def get(self, system_id, version=1, status={}): # Authenticate the system certificate if CFG.DISABLE_CHECKINS: self.update_checkin = 0 else: self.update_checkin = 1 self.auth_system(system_id) log_debug(1, self.server_id, version, "checkins %s" % ["disabled", "enabled"][self.update_checkin]) if status: self.__update_status(status) # Update the capabilities list rhnCapability.update_client_capabilities(self.server_id) # Invalidate failed actions self._invalidate_failed_prereq_actions() server_locked = self.server.server_locked() log_debug(3, "Server locked", server_locked) if self.__reboot_in_progress(): log_debug(3, "Server reboot in progress", self.server_id) rhnSQL.commit() return "" ret = {} # get the action. Status codes are currently: # 0 Queued # 1 Picked Up # 2 Completed # 3 Failed # XXX: we should really be using labels from rhnActionType instead of # hard coded type id numbers. # We fetch actions whose prerequisites have completed, and actions # that don't have prerequisites at all h = rhnSQL.prepare(self._query_queue_get) should_execute = 1 # Loop to get a valid action # (only one valid action will be dealt with per execution of this function...) while 1: if should_execute: h.execute(server_id=self.server_id) should_execute = 0 # Okay, got an action action = h.fetchone_dict() if not action: # No actions available; bail out # Don't forget the commit at the end... ret = "" break action_id = action['id'] log_debug(4, "Checking action %s" % action_id) # okay, now we have the action - process it. if action['remaining_tries'] < 1: log_debug(4, "Action %s picked up too many times" % action_id) # We've run out of pickup attempts for this action... self.__update_action(action_id, status=3, message="This action has been picked up multiple times " "without a successful transaction; " "this action is now failed for this system.") # Invalidate actions that depend on this one self._invalidate_child_actions(action_id) # keep looking for a good action to process... continue if server_locked and action['unlocked_only'] == 'Y': # This action is locked log_debug(4, "server id %s locked for action id %s" % ( self.server_id, action_id)) continue try: if version == 1: ret = self.__getV1(action) else: ret = self.__getV2(action) except ShadowAction: # Action the client should not see e = sys.exc_info()[1] # Make sure we re-execute the query, so we pick up whatever # extra actions were added should_execute = 1 text = e.args[0] log_debug(4, "Shadow Action", text) self.__update_action(action['id'], 2, 0, text) continue except InvalidAction: # This is an invalid action e = sys.exc_info()[1] # Update its status so it won't bother us again text = e.args[0] log_debug(4, "Invalid Action", text) self.__update_action(action['id'], 3, -99, text) continue except EmptyAction: e = sys.exc_info()[1] # this means that we have some sort of internal error # which gets reported in the logs. We don't touch the # action because this should get fixed on our side. log_error("Can not process action data", action, e.args) ret = "" break else: # all fine # Update the status of the action h = rhnSQL.prepare(""" update rhnServerAction set status = 1, pickup_time = current_timestamp, remaining_tries = :tries - 1 where action_id = :action_id and server_id = :server_id """) h.execute(action_id=action["id"], server_id=self.server_id, tries=action["remaining_tries"]) break # commit all changes rhnSQL.commit() return ret def submit(self, system_id, action_id, result, message="", data={}): """ Submit the results of a queue run. Maps old and new rhn_check behavior to new database status codes The new API uses 4 slightly different status codes than the old client does. This function will "hopefully" sensibly map them. Old methodology: -rhn_check retrieves an action from the top of the action queue. -It attempts to execute the desired action and returns either (a) 0 -- presumed successful. (b) rhnFault object -- presumed failed (c) some other non-fault object -- *assumed* successful. -Regardless of result code, action is marked as "executed" We try to make a smarter status selection (i.e. failed||completed). For reference: New DB status codes: Old DB status codes: 0: Queued 0: queued 1: Picked Up 1: picked up 2: Completed 2: executed 3: Failed 3: completed """ if type(action_id) is not IntType: # Convert it to int try: action_id = int(action_id) except ValueError: log_error("Invalid action_id", action_id) raise_with_tb(rhnFault(30, _("Invalid action value type %s (%s)") % (action_id, type(action_id))), sys.exc_info()[2]) # Authenticate the system certificate self.auth_system(system_id) log_debug(1, self.server_id, action_id, result) # check that the action is valid # We have a uniqueness constraint on (action_id, server_id) h = rhnSQL.prepare(""" select at.label action_type, at.trigger_snapshot, at.name from rhnServerAction sa, rhnAction a, rhnActionType at where sa.server_id = :server_id and sa.action_id = :action_id and sa.status = 1 and a.id = :action_id and a.action_type = at.id """) h.execute(server_id=self.server_id, action_id=action_id) row = h.fetchone_dict() if not row: log_error("Server %s does not own action %s" % ( self.server_id, action_id)) raise rhnFault(22, _("Action %s does not belong to server %s") % ( action_id, self.server_id)) action_type = row['action_type'] trigger_snapshot = (row['trigger_snapshot'] == 'Y') if 'missing_packages' in data: missing_packages = "Missing-Packages: %s" % str( data['missing_packages']) rmsg = "%s %s" % (message, missing_packages) elif 'koan' in data: rmsg = "%s: %s" % (message, data['koan']) else: rmsg = message rcode = result # Careful with this one, result can be a very complex thing # and this processing is required for compatibility with old # rhn_check clients if type(rcode) == type({}): if "faultCode" in result: rcode = result["faultCode"] if "faultString" in result: rmsg = result["faultString"] + str(data) if type(rcode) in [type({}), type(()), type([])] \ or type(rcode) is not IntType: rmsg = u"%s [%s]" % (UnicodeType(message), UnicodeType(rcode)) rcode = -1 # map to db codes. status = self.status_for_action_type_code(action_type, rcode) if status == 3: # Failed action - invalidate children self._invalidate_child_actions(action_id) elif action_type == 'reboot.reboot': # reboot action should stay as pickup rhnSQL.commit() return 0 elif status == 2 and trigger_snapshot and self.__should_snapshot(): # if action status is 'Completed', snapshot if allowed and if needed self.server.take_snapshot("Scheduled action completion: %s" % row['name']) self.__update_action(action_id, status, rcode, rmsg) # Store the status in a flag - easier than to complicate the action # plugin API by adding a status rhnFlags.set('action_id', action_id) rhnFlags.set('action_status', status) self.process_extra_data(self.server_id, action_id, data=data, action_type=action_type) # commit, because nobody else will rhnSQL.commit() return 0 def status_for_action_type_code(self, action_type, rcode): """ Convert whatever the client sends as a result code into a status in the database format This is more complicated, since some of the client's result codes have to be marked as successes. """ log_debug(4, action_type, rcode) if rcode == 0: # Completed return 2 if action_type not in self.action_type_completed_codes: # Failed return 3 hash = self.action_type_completed_codes[action_type] if rcode not in hash: # Failed return 3 # Completed return 2 def process_extra_data(self, server_id, action_id, data={}, action_type=None): log_debug(4, server_id, action_id, action_type) if not action_type: # Shouldn't happen return try: method = getMethod.getMethod(action_type, 'server.action_extra_data') except getMethod.GetMethodException: Traceback("queue.get V2") raise_with_tb(EmptyAction("Could not get a valid method for %s" % action_type), sys.exc_info()[2]) # Call the method result = method(self.server_id, action_id, data=data) return result def length(self, system_id): """ Return the queue length for a certain server. """ # Authenticate the system certificate self.auth_system(system_id) log_debug(1, self.server_id) h = rhnSQL.prepare(""" select count(action_id) id from rhnServerAction r where r.server_id = :server_id and r.status in (0, 1) """) h.execute(server_id=self.server_id) data = h.fetchone_dict() if data is None: return 0 return data["id"] # PRIVATE methods def __reboot_in_progress(self): """check for a reboot action for this server in status Picked Up""" log_debug(4, self.server_id) h = rhnSQL.prepare(""" select 1 from rhnServerAction sa join rhnAction a on sa.action_id = a.id join rhnActionType at on a.action_type = at.id where sa.server_id = :server_id and at.label = 'reboot.reboot' and sa.status = 1 -- Picked Up """) h.execute(server_id=self.server_id) ret = h.fetchone_dict() or None if ret: return True return False def __update_action(self, action_id, status, resultCode=None, message=""): """ Update the status of an action. """ log_debug(4, action_id, status, resultCode, message) rhnAction.update_server_action(server_id=self.server_id, action_id=action_id, status=status, result_code=resultCode, result_message=message) return 0 def __errataUpdate(self, actionId): """ Old client errata retrieval. """ log_debug(3, self.server_id, actionId) # get the names of the packages associated with each errata and # look them up in channels subscribed to by the server and select # the latest version sql = """ select pn.name name, pl.evr.version version, pl.evr.release release from ( select p.name_id, max(pe.evr) evr from rhnPackageEVR pe, rhnChannelPackage cp, rhnPackage p, rhnServerChannel sc, ( select p_name.name_id id from rhnActionErrataUpdate aeu, rhnErrataPackage ep, rhnPackage p_name where aeu.action_id = :action_id and aeu.errata_id = ep.errata_id and ep.package_id = p_name.id ) nids where nids.id = p.name_id and p.evr_id = pe.id and p.id = cp.package_id and cp.channel_id = sc.channel_id and sc.server_id = :server_id group by p.name_id ) pl, rhnPackageName pn where pn.id = pl.name_id """ h = rhnSQL.prepare(sql) h.execute(action_id=actionId, server_id=self.server_id) packages = [] while 1: ret = h.fetchone_dict() if not ret: break # older clients have issues with real epochs, se they are # kind of irrelevant packages.append([ret["name"], ret["version"], ret["release"], '']) xml = xmlrpclib.dumps((packages,), methodname='client.update_packages') return xml def __packageUpdate(self, actionId): """ Old client package retrieval. """ log_debug(3, self.server_id, actionId) # The SQL query is a union of: # - packages with a specific EVR # - the latest packages (no EVR specified) # XXX Should we want to schedule the install for a specific version, # we'll have to modify this statement = """ select distinct pkglist.name name, -- decode the evr object selected earlier pkglist.evr.version version, pkglist.evr.release release from ( -- get the max of the two possible cases select pl.name name, max(pl.evr) evr from ( -- if the EVR is specifically requested... select pn.name name, pe.evr evr from rhnActionPackage ap, rhnPackage p, rhnPackageName pn, rhnPackageEVR pe, rhnServerChannel sc, rhnChannelPackage cp where ap.action_id = :action_id and ap.evr_id is NOT NULL and ap.evr_id = p.evr_id and ap.evr_id = pe.id and ap.name_id = p.name_id and ap.name_id = pn.id and p.id = cp.package_id and cp.channel_id = sc.channel_id and sc.server_id = :server_id UNION -- when no EVR requested, we need to compute the max available -- from the channels the server is subscribed to select pn.name name, max(pevr.evr) evr from rhnActionPackage ap, rhnServerChannel sc, rhnChannelPackage cp, rhnPackage p, rhnPackageEVR pevr, rhnPackageName pn where ap.action_id = :action_id and ap.evr_id is null and ap.name_id = pn.id and ap.name_id = p.name_id and p.evr_id = pevr.id and sc.server_id = :server_id and sc.channel_id = cp.channel_id and cp.package_id = p.id group by pn.name ) pl group by pl.name ) pkglist """ h = rhnSQL.prepare(statement) h.execute(action_id=actionId, server_id=self.server_id) ret = h.fetchall_dict() or [] packages = [] for p in ret: # old clients have issues dealing with real epochs, so we # kind of fake it for now in here entry = [p['name'], p['version'], p['release'], ''] packages.append(entry) xml = xmlrpclib.dumps((packages,), methodname='client.update_packages') return xml
"Cannot remove non-empty channel %s" % config_channel, explain=0), None, sys.exc_info()[2] raise log_debug(5, "Removed:", config_channel) rhnSQL.commit() return "" _query_management_list_files = rhnSQL.Statement(""" select cc.label config_channel, cfn.path from rhnConfigFileName cfn, rhnConfigFileState cfs, rhnConfigFile cf, rhnConfigChannel cc where cc.org_id = :org_id and cc.label = :config_channel and cc.id = cf.config_channel_id and cf.state_id = cfs.id and cfs.label = 'alive' and cf.config_file_name_id = cfn.id """) def management_list_files(self, dict): log_debug(1) self._get_and_validate_session(dict) config_channel = dict.get('config_channel') # XXX Validate the config channel log_debug(3, "Org id", self.org_id, "Config channel", config_channel)