def _checkCertMatch_rhnCryptoKey(caCert, description, org_id, deleteRowYN=0, verbosity=0): """ is there an CA SSL certificate already in the database? If yes: return ID: -1, then no cert in DB None if they are identical (i.e., nothing to do) 0...N if cert is in database if found, optionally deletes the row and returns -1 Used ONLY by: store_rhnCryptoKey(...) """ cert = open(caCert, "rb").read().strip() h = rhnSQL.prepare(_querySelectCryptoCertInfo) h.execute(description=description, org_id=org_id) row = h.fetchone_dict() rhn_cryptokey_id = -1 if row: if cert == rhnSQL.read_lob(row["key"]): # match found, nothing to do if verbosity: print "Nothing to do: certificate to be pushed matches certificate in database." return # there can only be one (bugzilla: 120297) rhn_cryptokey_id = int(row["id"]) # print 'found existing certificate - id:', rhn_cryptokey_id ## NUKE IT! if deleteRowYN: # print 'found a cert, nuking it! id:', rhn_cryptokey_id h = rhnSQL.prepare("delete from rhnCryptoKey where id=:rhn_cryptokey_id") h.execute(rhn_cryptokey_id=rhn_cryptokey_id) # rhnSQL.commit() rhn_cryptokey_id = -1 return rhn_cryptokey_id
def test_lobs(self): new_id = rhnSQL.Sequence('misatestlob_id_seq').next() h = rhnSQL.prepare(""" insert into misatestlob (id, val) values (:id, empty_blob()) """) h.execute(id=new_id) h = rhnSQL.prepare(""" select val from misatestlob where id = :id for update of val """) h.execute(id=new_id) row = h.fetchone_dict() self.assertNotEqual(row, None) lob = row['val'] s = "" for i in range(256): s = s + chr(i) lob.write(s) rhnSQL.commit() h = rhnSQL.prepare(""" select val from misatestlob where id = :id """) h.execute(id=new_id) row = h.fetchone_dict() self.assertNotEqual(row, None) lob = row['val'] data = rhnSQL.read_lob(lob) self.assertEqual(data, s)
def server_used_token(server_id, token_id): h = rhnSQL.prepare(_query_check_server_uses_token) h.execute(server_id=server_id, token_id=token_id) ret = h.fetchone_dict() if not ret: h = rhnSQL.prepare(_query_server_token_used) h.execute(server_id=server_id, token_id=token_id)
def _process_testresult(tr, server_id, action_id, benchmark, profile, errors): start_time = None if tr.hasAttribute("start-time"): start_time = tr.getAttribute("start-time") h = rhnSQL.prepare(_query_insert_tresult, blob_map={"errors": "errors"}) h.execute( server_id=server_id, action_id=action_id, bench_id=_truncate(benchmark.getAttribute("id"), 120), bench_version=_truncate(benchmark.getAttribute("version"), 80), profile_id=profile.getAttribute("id"), profile_title=_truncate(profile.getAttribute("title"), 120), identifier=_truncate(tr.getAttribute("id"), 120), start_time=start_time.replace("T", " "), end_time=tr.getAttribute("end-time").replace("T", " "), errors=errors, ) h = rhnSQL.prepare(_query_get_tresult) h.execute(server_id=server_id, action_id=action_id) testresult_id = h.fetchone()[0] if not _process_ruleresults(testresult_id, tr): h = rhnSQL.prepare(_query_update_errors, blob_map={"errors": "errors"}) h.execute( testresult_id=testresult_id, errors=errors + "\nSome text strings were truncated when saving to the database.", )
def _register_dispatcher(self, jabber_id, hostname, port): h = rhnSQL.prepare(self._query_update_register_dispatcher) rowcount = h.execute(jabber_id_in=jabber_id, hostname_in=hostname, port_in=port, password_in=self._password) if not rowcount: h = rhnSQL.prepare(self._query_insert_register_dispatcher) h.execute(jabber_id_in=jabber_id, hostname_in=hostname, port_in=port, password_in=self._password) rhnSQL.commit()
def schedule_server_action(server_id, action_type, action_name=None, delta_time=0, scheduler=None, org_id=None, prerequisite=None): if not org_id: h = rhnSQL.prepare("select org_id from rhnServer where id = :id") h.execute(id=server_id) row = h.fetchone_dict() if not row: raise ValueError("Invalid server id %s" % server_id) org_id = row['org_id'] action_id = schedule_action(action_type, action_name, delta_time=delta_time, scheduler=scheduler, org_id=org_id, prerequisite=prerequisite, ) # Insert an action as Queued h = rhnSQL.prepare(""" insert into rhnServerAction (server_id, action_id, status) values (:server_id, :action_id, 0) """) h.execute(server_id=server_id, action_id=action_id) return action_id
def test_executemany(self): """ Tests the case of passing an integer as a value into a VARCHAR2 column (executemany makes it more interesting because the driver generally verifies the param types; passing a string and an Int takes it one step further) """ h = rhnSQL.prepare( """ insert into %s (id, val) values (:id, :val) """ % self.table_name ) params = {"id": [1, 2], "val": ["", 3]} apply(h.executemany, (), params) h = rhnSQL.prepare("select id, val from %s" % self.table_name) h.execute() rows = h.fetchall_dict() self.assertEqual(len(rows), 2) v_id, v_val = rows[0]["id"], rows[0]["val"] self.assertEqual(v_id, 1) self.assertEqual(v_val, None) v_id, v_val = rows[1]["id"], rows[1]["val"] self.assertEqual(v_id, 2) self.assertEqual(v_val, "3")
def update_uuid(self, uuid, commit=1): log_debug(3, uuid) # XXX Should determine a way to do this dinamically uuid_col_length = 36 if uuid is not None: uuid = str(uuid) if not uuid: log_debug('Nothing to do') return uuid = uuid[:uuid_col_length] server_id = self.server['id'] log_debug(4, "Trimmed uuid", uuid, server_id) # Update this server's UUID (unique client identifier) h = rhnSQL.prepare(self._query_update_uuid) ret = h.execute(server_id=server_id, uuid=uuid) log_debug(4, "execute returned", ret) if ret != 1: # Row does not exist, have to create it h = rhnSQL.prepare(self._query_insert_uuid) h.execute(server_id=server_id, uuid=uuid) if commit: rhnSQL.commit()
def _delete_rpm_group(packageIds): references = [ 'rhnChannelPackage', 'rhnErrataPackage', 'rhnErrataPackageTMP', 'rhnPackageChangelogRec', 'rhnPackageConflicts', 'rhnPackageFile', 'rhnPackageObsoletes', 'rhnPackageProvides', 'rhnPackageRequires', 'rhnPackageRecommends', 'rhnPackageSuggests', 'rhnPackageSupplements', 'rhnPackageEnhances', 'rhnPackageBreaks', 'rhnPackagePredepends', 'rhnServerNeededCache', ] deleteStatement = "delete from %s where package_id = :package_id" for table in references: h = rhnSQL.prepare(deleteStatement % table) count = h.executemany(package_id=packageIds) log_debug(3, "Deleted from %s: %d rows" % (table, count)) deleteStatement = "delete from rhnPackage where id = :package_id" h = rhnSQL.prepare(deleteStatement) count = h.executemany(package_id=packageIds) if count: log_debug(2, "DELETED package id %s" % str(packageIds)) else: log_error("No such package id %s" % str(packageIds)) rhnSQL.commit()
def _process_testresult(tr, server_id, action_id, benchmark, profile, errors): start_time = None if tr.hasAttribute('start-time'): start_time = tr.getAttribute('start-time') h = rhnSQL.prepare(_query_insert_tresult, blob_map={'errors': 'errors'}) h.execute(server_id=server_id, action_id=action_id, bench_id=_truncate(benchmark.getAttribute('id'), 120), bench_version=_truncate(benchmark.getAttribute('version'), 80), profile_id=profile.getAttribute('id'), profile_title=_truncate(profile.getAttribute('title'), 120), identifier=_truncate(tr.getAttribute('id'), 120), start_time=start_time.replace('T', ' '), end_time=tr.getAttribute('end-time').replace('T', ' '), errors=errors ) h = rhnSQL.prepare(_query_get_tresult) h.execute(server_id=server_id, action_id=action_id) testresult_id = h.fetchone()[0] if not _process_ruleresults(testresult_id, tr): h = rhnSQL.prepare(_query_update_errors, blob_map={'errors': 'errors'}) h.execute(testresult_id=testresult_id, errors=errors + '\nSome text strings were truncated when saving to the database.')
def client_set_namespaces(self, systemid, namespaces): self.auth_system(systemid) server_id = self.server.getid() org_id = self.server.server['org_id'] h = rhnSQL.prepare(""" delete from rhnServerConfigChannel where server_id = :server_id """) h.execute(server_id=server_id) h = rhnSQL.prepare(""" insert into rhnServerConfigChannel (server_id, config_channel_id, position) select :server_id, id, :position from rhnConfigChannel where name = :config_channel and org_id = :org_id """) position = 0 for config_channel in namespaces: rowcount = h.execute(server_id=server_id, position=position, config_channel=config_channel, org_id=org_id) if not rowcount: raise rhnFault(4009, "Unable to find config channel %s" % config_channel, explain=0) position = position + 1 rhnSQL.commit() return 0
def populate_rhn_dist_channel_map(channel_id, channel_arch_id, org_id, release): if not release: release = 'unit test' lookup = """ SELECT 1 FROM rhnDistChannelMap WHERE release = :release AND channel_arch_id = :channel_arch_id AND org_id = :org_id """ h = rhnSQL.prepare(lookup) h.execute( release = release, channel_arch_id = channel_arch_id, org_id = org_id ) if h.fetchone_dict(): return query_create = """ INSERT INTO rhnDistChannelMap (os, release, channel_arch_id, channel_id, org_id) VALUES (:os, :release, :channel_arch_id, :channel_id, :org_id) """ h = rhnSQL.prepare(query_create) h.execute( os = "TestOS", release = release, channel_arch_id = channel_arch_id, channel_id = channel_id, org_id = org_id ) rhnSQL.commit()
def _update_package_data(self, crash_id, pkg_data): log_debug(1, "_update_package_data: %s, %s" % (crash_id, pkg_data)) # Older versions of abrt used to store the package info in a single 'package' file if pkg_data and 'package' in pkg_data: (n, e, v, r) = parseRPMName(pkg_data['package']) if not all((n, e, v, r)): return 0 h = rhnSQL.prepare(_query_update_pkg_data1) r = h.execute( crash_id=crash_id, pkg_name=n, pkg_epoch=e, pkg_version=v, pkg_release=r) rhnSQL.commit() return r for item in ['pkg_name', 'pkg_epoch', 'pkg_version', 'pkg_release', 'pkg_arch']: if not (item in pkg_data and pkg_data[item]): return 0 h = rhnSQL.prepare(_query_update_pkg_data2) r = h.execute( crash_id=crash_id, pkg_name=pkg_data['pkg_name'], pkg_epoch=pkg_data['pkg_epoch'], pkg_version=pkg_data['pkg_version'], pkg_release=pkg_data['pkg_release'], pkg_arch=pkg_data['pkg_arch']) rhnSQL.commit() return r
def grant_channel_family_entitlements(org_id, channel_family, quantity): """ Check to see if org has a channelfamily associated with it. If not, Create one. """ _lookup_chfam = """ SELECT 1 from rhnChannelFamily WHERE label='%s' """ % channel_family h = rhnSQL.prepare(_lookup_chfam) row = h.execute() # some extra check for upgrades if row: # Already exists, move on return _query_create_chfam = """ INSERT INTO rhnChannelFamily (id, name, label, org_id, product_url) VALUES (sequence_nextval('rhn_channel_family_id_seq'), :name, :label, :org, :url) """ h = rhnSQL.prepare(_query_create_chfam) try: h.execute( name = 'Private Channel Family %s' % channel_family, label = channel_family, org = org_id, url = '%s url' % channel_family ) except rhnSQL.SQLError, e: # if we're here that means we're voilating something raise
def find_or_create_channel_arch(name, label): lookup = """ SELECT id from rhnChannelArch WHERE label='%s' AND name = '%s' """ % (label, name) h = rhnSQL.prepare(lookup) h.execute() row = h.fetchone_dict() if row: return row['id'] query_create = """ INSERT INTO rhnChannelArch (id, arch_type_id, label, name) VALUES (sequence_nextval('rhn_channel_arch_id_seq'), :arch_type_id, :label, :name) """ arch_type_id = find_or_create_arch_type(name = name, label = label) h = rhnSQL.prepare(query_create) try: h.execute( arch_type_id = arch_type_id, label = label, name = name ) rhnSQL.commit() except rhnSQL.SQLError, e: # if we're here that means we're voilating something raise
def __create_server_group(group_label, org_id, maxnum=''): """ create the initial server groups for a new server """ # Add this new server to the pending group h = rhnSQL.prepare(""" select sg.id, sg.current_members from rhnServerGroup sg where sg.group_type = ( select id from rhnServerGroupType where label = :group_label ) and sg.org_id = :org_id """) h.execute(org_id=org_id, group_label=group_label) data = h.fetchone_dict() if not data: # create the requested group ret_id = rhnSQL.Sequence("rhn_server_group_id_seq")() h = rhnSQL.prepare(""" insert into rhnServerGroup ( id, name, description, max_members, group_type, org_id) select :new_id, sgt.name, sgt.name, :maxnum, sgt.id, :org_id from rhnServerGroupType sgt where sgt.label = :group_label """) rownum = h.execute(new_id=ret_id, org_id=org_id, group_label=group_label, maxnum=str(maxnum)) if rownum == 0: # No rows were created, probably invalid label raise rhnException("Could not create new group for org=`%s'" % org_id, group_label) else: ret_id = data["id"] return ret_id
def update_push_client_jid(server_id, jid): h1 = rhnSQL.prepare(_query_delete_duplicate_client_jids) h1.execute(server_id=server_id, jid=jid) h2 = rhnSQL.prepare(_query_update_push_client_jid) h2.execute(server_id=server_id, jid=jid) rhnSQL.commit() return jid
def find_or_create_arch_type(name, label): lookup = """ SELECT id from rhnArchType WHERE label='%s' AND name = '%s' """ % (label, name) h = rhnSQL.prepare(lookup) h.execute() row = h.fetchone_dict() if row: return row['id'] query_create = """ INSERT INTO rhnArchType (id, label, name) VALUES (sequence_nextval('rhn_archtype_id_seq'), :label, :name) """ h = rhnSQL.prepare(query_create) try: h.execute( label=label, name=name ) rhnSQL.commit() except rhnSQL.SQLError: e = sys.exc_info()[1] # if we're here that means we're voilating something raise return find_or_create_arch_type(name, label)
def __db_insert_domain(self, host_id, uuid, properties): """ To create a new domain, we must modify both the rhnVirtualInstance and the rhnVirtualInstanceInfo tables. """ # We'll do rhnVirtualInstance first. get_id_sql = "SELECT sequence_nextval('rhn_vi_id_seq') as id FROM dual" query = rhnSQL.prepare(get_id_sql) query.execute() row = query.fetchone_dict() or {} if not row or 'id' not in row: raise VirtualizationEventError('unable to get virt instance id') id = row['id'] insert_sql = """ INSERT INTO rhnVirtualInstance (id, host_system_id, virtual_system_id, uuid, confirmed) VALUES (:id, :host_id, null, :uuid, 1) """ query = rhnSQL.prepare(insert_sql) query.execute(id=id, host_id=host_id, uuid=uuid) # Now we'll insert into the rhnVirtualInstanceInfo table. insert_sql = """ INSERT INTO rhnVirtualInstanceInfo (instance_id, name, vcpus, memory_size_k, instance_type, state) SELECT :id, :name, :vcpus, :memory, rvit.id, rvis.id FROM rhnVirtualInstanceType rvit, rhnVirtualInstanceState rvis WHERE rvit.label=:virt_type and rvis.label=:state """ name = properties[PropertyType.NAME] vcpus = properties[PropertyType.VCPUS] memory = properties[PropertyType.MEMORY] virt_type = properties[PropertyType.TYPE] state = properties[PropertyType.STATE] query = rhnSQL.prepare(insert_sql) query.execute(id=id, name=name, vcpus=vcpus, memory=memory, virt_type=virt_type, state=state)
def main(): rhnSQL.initDB() if not args: print("No module specified") return 0 if '.' not in sys.path: sys.path.append('.') g = globals() for module_name in args: print("Checking module %s" % module_name) pmn = proper_module_name(module_name) try: m = __import__(pmn) g[module_name] = m except ImportError: e = sys.exc_info()[1] print("Unable to import module %s: %s" % (module_name, e)) continue comps = pmn.split('.') for c in comps[1:]: m = getattr(m, c) for mod, name, statement in get_class_instances(m, rhnSQL.Statement): try: rhnSQL.prepare(statement) except rhnSQL.SQLStatementPrepareError: e = sys.exc_info()[1] print("Error: %s.%s: %s" % (mod.__name__, name, e))
def test_execute_rowcount(self): """Tests row counts""" table_name = "misatest" try: tables = self._list_tables() if not table_name in tables: rhnSQL.execute("create table %s (id int, value int)" % table_name) else: rhnSQL.execute("delete from %s" % table_name) insert_statement = rhnSQL.Statement( "insert into %s values (:item_id, :value)" % table_name ) h = rhnSQL.prepare(insert_statement) ret = h.execute(item_id=1, value=2) self.assertEqual(ret, 1) ret = h.execute(item_id=2, value=2) self.assertEqual(ret, 1) delete_statement = rhnSQL.Statement("delete from %s" % table_name) h = rhnSQL.prepare(delete_statement) ret = h.execute() self.assertEqual(ret, 2) rhnSQL.commit() finally: rhnSQL.execute("drop table %s" % table_name)
def verify_family_permissions(orgid=1): """ Verify channel family permissions for first org """ _query_lookup_cfid = """ SELECT CF.id FROM rhnChannelFamily CF WHERE CF.org_id = :orgid AND NOT EXISTS ( SELECT 1 FROM rhnPrivateChannelFamily PCF WHERE PCF.org_id = CF.org_id AND PCF.channel_family_id = CF.id) ORDER BY CF.id """ h = rhnSQL.prepare(_query_lookup_cfid) h.execute(orgid = orgid) cfid = h.fetchone_dict() if not cfid: return _query_create_priv_chfam = """ INSERT INTO rhnPrivateChannelFamily (channel_family_id, org_id, max_members, current_members) VALUES (:id, :org_id, NULL, 0) """ h = rhnSQL.prepare(_query_create_priv_chfam) h.execute(id=cfid['id'], org_id=orgid)
def load_suse_products(self): log_debug(1, "load suse_products") if not self.server['id']: return h = rhnSQL.prepare(""" SELECT s.guid, s.secret, ost.target ostarget FROM suseServer s LEFT JOIN suseOsTarget ost ON s.ostarget_id = ost.id WHERE s.rhn_server_id = :server_id """) h.execute(server_id = self.server['id']) self.suse_products = h.fetchone_dict() or {} if len(self.suse_products) > 0: h = rhnSQL.prepare(""" SELECT sip.name, sip.version, sip.release, rpa.label arch, sip.is_baseproduct baseproduct FROM suseInstalledProduct sip JOIN rhnPackageArch rpa ON sip.arch_type_id = rpa.id JOIN suseServerInstalledProduct ssip ON sip.id = ssip.suse_installed_product_id WHERE ssip.rhn_server_id = :server_id """) h.execute(server_id = self.server['id']) self.suse_products['products'] = h.fetchall_dict() or []
def delete_guests(server_id): """ Callback used after a successful kickstart to remove any guest virtual instances, as well as their associated servers. """ # First delete all the guest server objects: h = rhnSQL.prepare(_query_lookup_guests_for_host) h.execute(server_id=server_id) delete_server = rhnSQL.Procedure("delete_server") log_debug(4, "Deleting guests") while 1: row = h.fetchone_dict() if not row: break guest_id = row['virtual_system_id'] log_debug(4, 'Deleting guest server: %s' % guest_id) try: if guest_id is not None: delete_server(guest_id) except rhnSQL.SQLError: log_error("Error deleting server: %s" % guest_id) # Finally delete all the virtual instances: log_debug(4, "Deleting all virtual instances for host") h = rhnSQL.prepare(_query_delete_virtual_instances) h.execute(server_id=server_id) # Commit all changes: try: rhnSQL.commit() except rhnSQL.SQLError: e = sys.exc_info()[1] log_error("Error committing transaction: %s" % e) rhnSQL.rollback()
def _push_config_file(self, file): config_info_query = self._query_lookup_non_symlink_config_info if self._is_link(file) and file.get("symlink"): config_info_query = self._query_lookup_symlink_config_info # Look up the config info first h = rhnSQL.prepare(config_info_query) h.execute(**file) row = h.fetchone_dict() if not row: # Hmm raise rhnException("This query should always return a row") config_info_id = row['id'] file['config_info_id'] = config_info_id # Look up the config file itself h = rhnSQL.prepare(self._query_lookup_config_file) h.execute(**file) row = h.fetchone_dict() if row: # Yay we already have this file # Later down the road, we're going to update modified for this # table file['config_file_id'] = row['id'] return # Have to insert this config file, gotta use the api to keep quotas up2date... insert_call = rhnSQL.Function("rhn_config.insert_file", rhnSQL.types.NUMBER()) file['config_file_id'] = insert_call(file['config_channel_id'], file['path'])
def schedule_kickstart_delta(server_id, kickstart_session_id, installs, removes): log_debug(3, server_id, kickstart_session_id) row = get_kickstart_session_info(kickstart_session_id, server_id) org_id = row['org_id'] scheduler = row['scheduler'] action_id = rhnAction.schedule_server_action( server_id, action_type='packages.runTransaction', action_name="Package delta", delta_time=0, scheduler=scheduler, org_id=org_id, ) package_delta_id = rhnSQL.Sequence('rhn_packagedelta_id_seq').next() h = rhnSQL.prepare(_query_insert_package_delta) h.execute(package_delta_id=package_delta_id) h = rhnSQL.prepare(_query_insert_action_package_delta) h.execute(action_id=action_id, package_delta_id=package_delta_id) h = rhnSQL.prepare(_query_insert_package_delta_element) col_names = ['n', 'v', 'r', 'e'] __execute_many(h, installs, col_names, operation='insert', a=None, package_delta_id=package_delta_id) __execute_many(h, removes, col_names, operation='delete', a=None, package_delta_id=package_delta_id) update_ks_session_table(kickstart_session_id, 'package_synch_scheduled', action_id, server_id) return action_id
def cleanup_profile(server_id, action_id, ks_session_id, action_status): if ks_session_id is None: log_debug(4, "No kickstart session") return if action_status != 2: log_debug(4, "Action status: %s; nothing to do" % action_status) return h = rhnSQL.prepare(_query_lookup_ks_server_profile) h.execute(ks_session_id=ks_session_id, profile_type_label='sync_profile') row = h.fetchone_dict() if not row: log_debug(4, "No server profile of the right type found; nothing to do") return server_profile_id = row['server_profile_id'] if server_profile_id is None: log_debug(4, "No server profile associated with this kickstart session") return # There is an "on delete cascade" constraint on # rhnKickstartSession.server_profile_id and on # rhnServerProfilePacakge.server_profile_id h = rhnSQL.prepare(_query_delete_server_profile) h.execute(server_profile_id=server_profile_id)
def create_first_private_chan_family(): """ Check to see if org has a channelfamily associated with it. If not, Create one. """ _lookup_chfam = """ SELECT 1 from rhnChannelFamily WHERE label='private-channel-family-1' """ h = rhnSQL.prepare(_lookup_chfam) row = h.execute() # some extra check for upgrades if row: # Already exists, move on return _query_create_chfam = """ INSERT INTO rhnChannelFamily (id, name, label, org_id, product_url) VALUES (sequence_nextval('rhn_channel_family_id_seq'), :name, :label, :org, :url) """ h = rhnSQL.prepare(_query_create_chfam) try: h.execute(name='Private Channel Family 1', \ label='private-channel-family-1', \ org=1, url='First Org Created') except rhnSQL.SQLError, e: # if we're here that means we're voilating something raise
def mtime_upload(server_id, action_id, data={}): # at this point in time, no rhnActionConfigFileName entries exist, because # we didn't know them at schedule time... go ahead and create them now, and then # just use the main upload to handle the updating of the state... paths = data.get('attempted_paths') or [] if not paths: log_debug(6, "no matched files") return log_debug(6, 'attempted paths', paths) # if there are already rhnActionConfigFileName entries for this sid+aid, # it's most likely a rescheduled action, and we'll need to blow away the old # entries (they might not be valid any longer) h = rhnSQL.prepare(_query_any_action_config_filenames) h.execute(server_id=server_id, action_id=action_id) already_filenames = h.fetchone_dict() or [] if already_filenames: h = rhnSQL.prepare(_query_clear_action_config_filenames) h.execute(server_id=server_id, action_id=action_id) num_paths = len(paths) h = rhnSQL.prepare(_query_create_action_config_filename) h.execute_bulk({ 'action_id' : [action_id] * num_paths, 'server_id' : [server_id] * num_paths, 'path' : paths, }) upload(server_id, action_id, data)
def run(server_id, action_id, data={}): log_debug(3) # clear any previously received output h = rhnSQL.prepare(_query_clear_output) h.execute(server_id=server_id, action_id=action_id) if not data: log_debug(4, "No data sent by client") return output = data.get('output') # newer clients should always be setting # this flag and encoding the results, # otherwise xmlrpc isn't very happy on certain characters if data.has_key('base64enc'): output = base64.decodestring(output) return_code = data.get('return_code') process_end = data.get('process_end') process_start = data.get('process_start') log_debug(4, "script output", output) h = rhnSQL.prepare(_query_initial_store, blob_map={'output': 'output'}) h.execute(server_id=server_id, action_id=action_id, process_start=process_start, process_end=process_end, return_code=return_code, output=output )
def _push_file(self, config_channel_id, file): if not file: # Nothing to do return {} # Check for full path on the file path = file.get('path') if not (path[0] == os.sep): raise ConfigFilePathIncomplete(file) if 'config_file_type_id' not in file: log_debug(4, "Client does not support config directories, so set file_type_id to 1") file['config_file_type_id'] = '1' # Check if delimiters are present if self._is_file(file) and \ not (file.get('delim_start') and file.get('delim_end')): # Need delimiters raise ConfigFileMissingDelimError(file) if not (file.get('user') and file.get('group') and file.get('mode') is not None) and not self._is_link(file): raise ConfigFileMissingInfoError(file) # Oracle doesn't like certain binding variables file['username'] = file.get('user', '') file['groupname'] = file.get('group', '') file['file_mode'] = str(file.get('mode', '')) # if the selinux flag is not sent by the client it is set to the last file # revision (or to None (i.e. NULL) in case of first revision) - see the bug # 644985 - SELinux context cleared from RHEL4 rhncfg-client file['selinux_ctx'] = file.get('selinux_ctx', None) if not file['selinux_ctx']: # RHEL4 or RHEL5+ with disabled selinux - set from the last revision h = rhnSQL.prepare(self._query_current_selinux_lookup) h.execute(**file) row = h.fetchone_dict() if row: file['selinux_ctx'] = row['selinux_ctx'] else: file['selinux_ctx'] = None result = {} try: if self._is_file(file): self._push_contents(file) elif self._is_link(file): file['symlink'] = file.get('symlink') or '' except ConfigFileTooLargeError: result['file_too_large'] = 1 t = rhnSQL.Table('rhnConfigFileState', 'label') state_id_alive = t['alive']['id'] file['state_id'] = state_id_alive file['config_channel_id'] = config_channel_id try: self._push_config_file(file) self._push_revision(file) except rhnSQL.SQLSchemaError, e: log_debug(4, "schema error", e) rhnSQL.rollback() # blow away the contents that got inserted if e.errno == 20267: # ORA-20267: (not_enough_quota) - Insufficient available quota # for the specified action raise ConfigFileExceedsQuota(file) raise
def __init__(self, pkg_mapper, erratum_mapper, comps_mapper): self.pkg_mapper = pkg_mapper self.erratum_mapper = erratum_mapper self.comps_mapper = comps_mapper self.channel_details_sql = rhnSQL.prepare(""" select c.label, c.name, ct.label checksum_type from rhnChannel c, rhnChecksumType ct where c.id = :channel_id and c.checksum_type_id = ct.id """) self.channel_sql = rhnSQL.prepare(""" select package_id from rhnChannelPackage where channel_id = :channel_id """) self.last_modified_sql = rhnSQL.prepare(""" select to_char(last_modified, 'YYYYMMDDHH24MISS') as last_modified from rhnChannel where id = :channel_id """) self.errata_id_sql = rhnSQL.prepare(""" select e.id from rhnChannelErrata ce, rhnErrata e where ce.channel_id = :channel_id and e.id = ce.errata_id """) self.comps_id_sql = rhnSQL.prepare(""" select id from rhnChannelComps where channel_id = :channel_id order by id desc """) self.cloned_from_id_sql = rhnSQL.prepare(""" select original_id id from rhnChannelCloned where id = :channel_id """)
def __init__(self): self.details_sql = rhnSQL.prepare(""" select pn.name, pevr.version, pevr.release, pevr.epoch, pa.label arch, c.checksum checksum, p.summary, p.description, p.vendor, p.build_time, p.package_size, p.payload_size, p.installed_size, p.header_start, p.header_end, pg.name package_group, p.build_host, p.copyright, p.path, sr.name source_rpm, p.last_modified, c.checksum_type from rhnPackage p, rhnPackageName pn, rhnPackageEVR pevr, rhnPackageArch pa, rhnPackageGroup pg, rhnSourceRPM sr, rhnChecksumView c where p.id = :package_id and p.name_id = pn.id and p.evr_id = pevr.id and p.package_arch_id = pa.id and p.package_group = pg.id and p.source_rpm_id = sr.id and p.checksum_id = c.id """) self.filelist_sql = rhnSQL.prepare(""" select pc.name from rhnPackageCapability pc, rhnPackageFile pf where pf.package_id = :package_id and pf.capability_id = pc.id """) self.prco_sql = rhnSQL.prepare(""" select 'provides', pp.sense, pc.name, pc.version from rhnPackageCapability pc, rhnPackageProvides pp where pp.package_id = :package_id and pp.capability_id = pc.id union all select 'requires', pr.sense, pc.name, pc.version from rhnPackageCapability pc, rhnPackageRequires pr where pr.package_id = :package_id and pr.capability_id = pc.id union all select 'recommends', prec.sense, pc.name, pc.version from rhnPackageCapability pc, rhnPackageRecommends prec where prec.package_id = :package_id and prec.capability_id = pc.id union all select 'supplements', supp.sense, pc.name, pc.version from rhnPackageCapability pc, rhnPackageSupplements supp where supp.package_id = :package_id and supp.capability_id = pc.id union all select 'enhances', enh.sense, pc.name, pc.version from rhnPackageCapability pc, rhnPackageEnhances enh where enh.package_id = :package_id and enh.capability_id = pc.id union all select 'suggests', sugg.sense, pc.name, pc.version from rhnPackageCapability pc, rhnPackageSuggests sugg where sugg.package_id = :package_id and sugg.capability_id = pc.id union all select 'conflicts', pcon.sense, pc.name, pc.version from rhnPackageCapability pc, rhnPackageConflicts pcon where pcon.package_id = :package_id and pcon.capability_id = pc.id union all select 'obsoletes', po.sense, pc.name, pc.version from rhnPackageCapability pc, rhnPackageObsoletes po where po.package_id = :package_id and po.capability_id = pc.id union all select 'breaks', brks.sense, pc.name, pc.version from rhnPackageCapability pc, rhnPackageBreaks brks where brks.package_id = :package_id and brks.capability_id = pc.id union all select 'predepends', pdep.sense, pc.name, pc.version from rhnPackageCapability pc, rhnPackagePredepends pdep where pdep.package_id = :package_id and pdep.capability_id = pc.id """) self.last_modified_sql = rhnSQL.prepare(""" select to_char(last_modified, 'YYYYMMDDHH24MISS') as last_modified from rhnPackage where id = :package_id """) self.other_sql = rhnSQL.prepare(""" select name, text, time from rhnPackageChangelog where package_id = :package_id """)
def __init__(self, writer, params): statement = rhnSQL.prepare(self.iterator_query) iterator = QueryIterator(statement, params) exportLib.ErrataDumper.__init__(self, writer, iterator)
def _update_config_file(self, file): h = rhnSQL.prepare(self._query_update_config_file) h.execute(**file)
def _add_author(self, file, author): h = rhnSQL.prepare(self._query_update_revision_add_author) h.execute(user_id = author.getid(), rev_id = file['config_revision_id'])
def _update_revision(self, file): h = rhnSQL.prepare(self._query_update_revision) h.execute(**file)
def get_running_clients(self): log_debug(3) h = rhnSQL.prepare(self._query_get_running_clients) h.execute() row = h.fetchone_dict() or {} return int(row.get("clients", 0))
def submit(self, system_id, action_id, result, message="", data={}): """ Submit the results of a queue run. Maps old and new rhn_check behavior to new database status codes The new API uses 4 slightly different status codes than the old client does. This function will "hopefully" sensibly map them. Old methodology: -rhn_check retrieves an action from the top of the action queue. -It attempts to execute the desired action and returns either (a) 0 -- presumed successful. (b) rhnFault object -- presumed failed (c) some other non-fault object -- *assumed* successful. -Regardless of result code, action is marked as "executed" We try to make a smarter status selection (i.e. failed||completed). For reference: New DB status codes: Old DB status codes: 0: Queued 0: queued 1: Picked Up 1: picked up 2: Completed 2: executed 3: Failed 3: completed """ if type(action_id) is not IntType: # Convert it to int try: action_id = int(action_id) except ValueError: log_error("Invalid action_id", action_id) raise rhnFault( 30, _("Invalid action value type %s (%s)") % (action_id, type(action_id))), None, sys.exc_info()[2] # Authenticate the system certificate self.auth_system(system_id) log_debug(1, self.server_id, action_id, result) # check that the action is valid # We have a uniqueness constraint on (action_id, server_id) h = rhnSQL.prepare(""" select at.label action_type, at.trigger_snapshot, at.name from rhnServerAction sa, rhnAction a, rhnActionType at where sa.server_id = :server_id and sa.action_id = :action_id and sa.status = 1 and a.id = :action_id and a.action_type = at.id """) h.execute(server_id=self.server_id, action_id=action_id) row = h.fetchone_dict() if not row: log_error("Server %s does not own action %s" % (self.server_id, action_id)) raise rhnFault( 22, _("Action %s does not belong to server %s") % (action_id, self.server_id)) action_type = row['action_type'] trigger_snapshot = (row['trigger_snapshot'] == 'Y') if data.has_key('missing_packages'): missing_packages = "Missing-Packages: %s" % str( data['missing_packages']) rmsg = "%s %s" % (message, missing_packages) elif data.has_key('koan'): rmsg = "%s: %s" % (message, data['koan']) else: rmsg = message rcode = result # Careful with this one, result can be a very complex thing # and this processing is required for compatibility with old # rhn_check clients if type(rcode) == type({}): if result.has_key("faultCode"): rcode = result["faultCode"] if result.has_key("faultString"): rmsg = result["faultString"] + str(data) if type(rcode) in [type({}), type(()), type([])] \ or type(rcode) is not IntType: rmsg = u"%s [%s]" % (unicode(message), unicode(rcode)) rcode = -1 # map to db codes. status = self.status_for_action_type_code(action_type, rcode) if status == 3: # Failed action - invalidate children self._invalidate_child_actions(action_id) elif action_type == 'reboot.reboot': # reboot action should stay as pickup rhnSQL.commit() return 0 elif status == 2 and trigger_snapshot and self.__should_snapshot(): # if action status is 'Completed', snapshot if allowed and if needed self.server.take_snapshot("Scheduled action completion: %s" % row['name']) self.__update_action(action_id, status, rcode, rmsg) # Store the status in a flag - easier than to complicate the action # plugin API by adding a status rhnFlags.set('action_id', action_id) rhnFlags.set('action_status', status) self.process_extra_data(self.server_id, action_id, data=data, action_type=action_type) # commit, because nobody else will rhnSQL.commit() return 0
def create_update_suse_products(self, sysid, guid, secret, ostarget, products): log_debug(4, sysid, guid, ostarget, products) # search, if a suseServer with this guid exists which is not this server # this would indicate a re-registration and we need to remove the old rhnServer h = rhnSQL.prepare(""" SELECT rhn_server_id as id FROM suseServer WHERE guid = :guid AND rhn_server_id != :sysid """) h.execute(sysid=sysid, guid=guid) d = h.fetchone_dict() if d: old_sysid = d['id'] log_debug(1, "Found duplicate server:", old_sysid) delete_server = rhnSQL.Procedure("delete_server") try: if old_sysid != None: delete_server(old_sysid) except rhnSQL.SQLError: log_error("Error deleting server: %s" % old_sysid) # IF we delete rhnServer all reference are deleted too # # now switch suseServer to new id #h = rhnSQL.prepare(""" # UPDATE suseServer # SET rhn_server_id = :sysid # WHERE rhn_server_id = :oldsysid #""") #h.execute(sysid=sysid, oldsysid=old_sysid); # remove this guid from suseDelServer list h = rhnSQL.prepare(""" DELETE FROM suseDelServer WHERE guid = :guid """) h.execute(guid=guid) #rhnSQL.commit() # search if suseServer with ID sysid exists h = rhnSQL.prepare(""" SELECT s.rhn_server_id as id, s.guid, s.secret, sot.target as ostarget, s.ncc_sync_required FROM suseServer s LEFT JOIN suseOSTarget sot ON s.ostarget_id = sot.id WHERE rhn_server_id = :sysid """) h.execute(sysid=sysid) t = h.fetchone_dict() ncc_sync_required = False # if not; create new suseServer if not t: ncc_sync_required = True h = rhnSQL.prepare(""" INSERT INTO suseServer (rhn_server_id, guid, secret, ostarget_id) values (:sysid, :guid, :secret, (select id from suseOSTarget where os = :ostarget)) """) h.execute(sysid=sysid, guid=guid, secret=secret, ostarget=ostarget) else: # if yes, read values and compare them with the provided data # update if needed data = { 'rhn_server_id': sysid, 'guid': guid, 'secret': secret, 'ostarget': ostarget } if t['guid'] != guid or t['secret'] != secret or t[ 'ostarget'] != ostarget: ncc_sync_required = True h = rhnSQL.prepare(""" UPDATE suseServer SET guid = :guid, secret = :secret, ostarget_id = (select id from suseOSTarget where os = :ostarget) WHERE rhn_server_id = :rhn_server_id """) h.execute(*(), **data) # check products h = rhnSQL.prepare(""" SELECT suse_installed_product_id as id FROM suseServerInstalledProduct WHERE rhn_server_id = :sysid """) h.execute(sysid=sysid) existing_products = [x['id'] for x in h.fetchall_dict() or []] for product in products: sipid = self.get_installed_product_id(product) if not sipid: continue if sipid in existing_products: existing_products.remove(sipid) continue h = rhnSQL.prepare(""" INSERT INTO suseServerInstalledProduct (rhn_server_id, suse_installed_product_id) VALUES(:sysid, :sipid) """) h.execute(sysid=sysid, sipid=sipid) ncc_sync_required = True for pid in existing_products: h = rhnSQL.prepare(""" DELETE from suseServerInstalledProduct WHERE rhn_server_id = :sysid AND suse_installed_product_id = :pid """) h.execute(sysid=sysid, pid=pid) ncc_sync_required = True if ncc_sync_required: # If the data have changed, we set the # sync_required flag and reset the errors # flag to give the registration another try h = rhnSQL.prepare(""" UPDATE suseServer SET ncc_sync_required = 'Y', ncc_reg_error = 'N' WHERE rhn_server_id = :sysid """) h.execute(sysid=sysid)
log_debug(4, "Invalid Action", text) self.__update_action(action['id'], 3, -99, text) continue except EmptyAction, e: # this means that we have some sort of internal error # which gets reported in the logs. We don't touch the # action because this should get fixed on our side. log_error("Can not process action data", action, e.args) ret = "" break else: # all fine # Update the status of the action h = rhnSQL.prepare(""" update rhnServerAction set status = 1, pickup_time = current_timestamp, remaining_tries = :tries - 1 where action_id = :action_id and server_id = :server_id """) h.execute(action_id=action["id"], server_id=self.server_id, tries=action["remaining_tries"]) break # commit all changes rhnSQL.commit() return ret def submit(self, system_id, action_id, result, message="", data={}): """ Submit the results of a queue run.
def __packageUpdate(self, actionId): """ Old client package retrieval. """ log_debug(3, self.server_id, actionId) # The SQL query is a union of: # - packages with a specific EVR # - the latest packages (no EVR specified) # XXX Should we want to schedule the install for a specific version, # we'll have to modify this statement = """ select distinct pkglist.name name, -- decode the evr object selected earlier pkglist.evr.version version, pkglist.evr.release release from ( -- get the max of the two possible cases select pl.name name, max(pl.evr) evr from ( -- if the EVR is specifically requested... select pn.name name, pe.evr evr from rhnActionPackage ap, rhnPackage p, rhnPackageName pn, rhnPackageEVR pe, rhnServerChannel sc, rhnChannelPackage cp where ap.action_id = :action_id and ap.evr_id is NOT NULL and ap.evr_id = p.evr_id and ap.evr_id = pe.id and ap.name_id = p.name_id and ap.name_id = pn.id and p.id = cp.package_id and cp.channel_id = sc.channel_id and sc.server_id = :server_id UNION -- when no EVR requested, we need to compute the max available -- from the channels the server is subscribed to select pn.name name, max(pevr.evr) evr from rhnActionPackage ap, rhnServerChannel sc, rhnChannelPackage cp, rhnPackage p, rhnPackageEVR pevr, rhnPackageName pn where ap.action_id = :action_id and ap.evr_id is null and ap.name_id = pn.id and ap.name_id = p.name_id and p.evr_id = pevr.id and sc.server_id = :server_id and sc.channel_id = cp.channel_id and cp.package_id = p.id group by pn.name ) pl group by pl.name ) pkglist """ h = rhnSQL.prepare(statement) h.execute(action_id=actionId, server_id=self.server_id) ret = h.fetchall_dict() or [] packages = [] for p in ret: # old clients have issues dealing with real epochs, so we # kind of fake it for now in here entry = [p['name'], p['version'], p['release'], ''] packages.append(entry) xml = xmlrpclib.dumps((packages, ), methodname='client.update_packages') return xml
def delete_channels(channelLabels, force=0, justdb=0, skip_packages=0, skip_channels=0, skip_kickstart_trees=0, just_kickstart_trees=0): # Get the package ids if not channelLabels: return rpms_ids = list_packages(channelLabels, force=force, sources=0) rpms_paths = _get_package_paths(rpms_ids, sources=0) srpms_ids = list_packages(channelLabels, force=force, sources=1) srpms_paths = _get_package_paths(srpms_ids, sources=1) if not skip_packages and not just_kickstart_trees: _delete_srpms(srpms_ids) _delete_rpms(rpms_ids) if not skip_kickstart_trees and not justdb: _delete_ks_files(channelLabels) if not justdb and not skip_packages and not just_kickstart_trees: _delete_files(rpms_paths + srpms_paths) # Get the channel ids h = rhnSQL.prepare(""" select id, parent_channel from rhnChannel where label = :label order by parent_channel""") channel_ids = [] for label in channelLabels: h.execute(label=label) row = h.fetchone_dict() if not row: break channel_id = row['id'] if row['parent_channel']: # Subchannel, we have to remove it first channel_ids.insert(0, channel_id) else: channel_ids.append(channel_id) if not channel_ids: return indirect_tables = [ ['rhnKickstartableTree', 'channel_id', 'rhnKSTreeFile', 'kstree_id'], ] query = """ delete from %(table_2)s where %(link_field)s in ( select id from %(table_1)s where %(channel_field)s = :channel_id ) """ for e in indirect_tables: args = { 'table_1': e[0], 'channel_field': e[1], 'table_2': e[2], 'link_field': e[3], } h = rhnSQL.prepare(query % args) h.executemany(channel_id=channel_ids) tables = [ ['rhnErrataFileChannel', 'channel_id'], ['rhnErrataNotificationQueue', 'channel_id'], ['rhnChannelErrata', 'channel_id'], ['rhnChannelPackage', 'channel_id'], ['rhnRegTokenChannels', 'channel_id'], ['rhnServerProfile', 'base_channel'], ['rhnKickstartableTree', 'channel_id'], ] if not skip_channels: tables.extend([ ['suseProductChannel', 'channel_id'], ['rhnChannelFamilyMembers', 'channel_id'], ['rhnDistChannelMap', 'channel_id'], ['rhnReleaseChannelMap', 'channel_id'], ['rhnChannel', 'id'], ]) if just_kickstart_trees: tables = [['rhnKickstartableTree', 'channel_id']] query = "delete from %s where %s = :channel_id" for table, field in tables: log_debug(3, "Processing table %s" % table) h = rhnSQL.prepare(query % (table, field)) h.executemany(channel_id=channel_ids) if not justdb and not just_kickstart_trees: __deleteRepoData(channelLabels)
def get(self, system_id, version=1, status={}): # Authenticate the system certificate if CFG.DISABLE_CHECKINS: self.update_checkin = 0 else: self.update_checkin = 1 self.auth_system(system_id) log_debug(1, self.server_id, version, "checkins %s" % ["disabled", "enabled"][self.update_checkin]) if status: self.__update_status(status) # Update the capabilities list rhnCapability.update_client_capabilities(self.server_id) # Invalidate failed actions self._invalidate_failed_prereq_actions() server_locked = self.server.server_locked() log_debug(3, "Server locked", server_locked) if self.__reboot_in_progress(): log_debug(3, "Server reboot in progress", self.server_id) rhnSQL.commit() return "" ret = {} # get the action. Status codes are currently: # 0 Queued # 1 Picked Up # 2 Completed # 3 Failed # XXX: we should really be using labels from rhnActionType instead of # hard coded type id numbers. # We fetch actions whose prerequisites have completed, and actions # that don't have prerequisites at all h = rhnSQL.prepare(self._query_queue_get) should_execute = 1 # Loop to get a valid action # (only one valid action will be dealt with per execution of this function...) while 1: if should_execute: h.execute(server_id=self.server_id) should_execute = 0 # Okay, got an action action = h.fetchone_dict() if not action: # No actions available; bail out # Don't forget the commit at the end... ret = "" break action_id = action['id'] log_debug(4, "Checking action %s" % action_id) # okay, now we have the action - process it. if action['remaining_tries'] < 1: log_debug(4, "Action %s picked up too many times" % action_id) # We've run out of pickup attempts for this action... self.__update_action( action_id, status=3, message="This action has been picked up multiple times " "without a successful transaction; " "this action is now failed for this system.") # Invalidate actions that depend on this one self._invalidate_child_actions(action_id) # keep looking for a good action to process... continue if server_locked and action['unlocked_only'] == 'Y': # This action is locked log_debug( 4, "server id %s locked for action id %s" % (self.server_id, action_id)) continue try: if version == 1: ret = self.__getV1(action) else: ret = self.__getV2(action) except ShadowAction, e: # Action the client should not see # Make sure we re-execute the query, so we pick up whatever # extra actions were added should_execute = 1 text = e.args[0] log_debug(4, "Shadow Action", text) self.__update_action(action['id'], 2, 0, text) continue except InvalidAction, e: # This is an invalid action # Update its status so it won't bother us again text = e.args[0] log_debug(4, "Invalid Action", text) self.__update_action(action['id'], 3, -99, text) continue
def update(serverId, actionId, dry_run=0): h = rhnSQL.prepare(_packageStatement_update) h.execute(serverid=serverId, actionid=actionId) tmppackages = h.fetchall_dict() return handle_action(serverId, actionId, tmppackages, dry_run)
def delete_rhnCryptoKey_null_org(description_prefix): h = rhnSQL.prepare(_queryDeleteCryptoCertInfoNullOrg) h.execute(description_prefix=description_prefix)
def fetch_token(token_string): """ Fetches a token from the database """ log_debug(3, token_string) # A token should always be passed to this function assert token_string tokens = token_string.split(',') h = rhnSQL.prepare(_query_token) result = [] rereg_token_found = 0 num_of_rereg = 0 # Global user_id and org_id user_id = None same_user_id = 1 org_id = None ks_session_id_token = None deploy_configs = None entitlements_base = {} entitlements_extra = {} # List of re-registration entitlements labels (if found): rereg_ents = [] for token in tokens: h.execute(token=token) token_entry, token_entitlements = _fetch_token_from_cursor(h) if not token_entry: # Unable to find the token log_error("Invalid token '%s'" % token) raise rhnFault(60, _("Could not find token '%s'") % token, explain=0) row = token_entry if row.get('server_id'): rereg_token_found = row num_of_rereg += 1 # Store the re-reg ents: for tup in token_entitlements.keys(): rereg_ents.append(tup[0]) # Check user_id token_user_id = row.get('user_id') # 4/27/05 wregglej - Commented this line out 'cause the token_user_id should # be allowed to be None. This line was causing problems when registering with # an activation key whose creator had been deleted. #assert(token_user_id is not None) if same_user_id and user_id is not None and user_id != token_user_id: log_debug(4, "Different user ids: %s, %s" % (same_user_id, user_id)) # This token has a different user id than the rest same_user_id = 0 else: user_id = token_user_id # Check org_id token_org_id = row.get('org_id') assert (token_org_id is not None) if org_id is not None and org_id != token_org_id: # Cannot use activation keys from different orgs raise rhnFault(63, _("Tokens from mismatching orgs"), explain=0) org_id = token_org_id # Check kickstart session ids token_ks_session_id = row.get('kickstart_session_id') if token_ks_session_id is not None: if ks_session_id_token is not None: ks_session_id = ks_session_id_token['kickstart_session_id'] if ks_session_id != token_ks_session_id: # Two tokens with different kickstart sessions raise rhnFault(63, _("Kickstart session mismatch"), explain=0) else: # This token has kickstart session id info ks_session_id_token = row # Iterate through the entitlements from this token # and intead of picking one entitlement, create a union of # all the entitlemts as a list of tuples of (name, label) aka # (token_type, token_desc) _categorize_token_entitlements(token_entitlements, entitlements_base, entitlements_extra) # Deploy configs? deploy_configs = deploy_configs or (row['deploy_configs'] == 'Y') result.append(row) # One should not stack re-activation tokens if num_of_rereg > 1: raise rhnFault( 63, _("Stacking of re-registration tokens is not supported"), explain=0) entitlements_remove = [] _validate_entitlements(token_string, rereg_ents, entitlements_base, entitlements_extra, entitlements_remove) log_debug(5, "entitlements_base = %s" % entitlements_base) log_debug(5, "entitlements_extra = %s" % entitlements_extra) if ks_session_id_token: ks_session_id = ks_session_id_token['kickstart_session_id'] else: ks_session_id = None # akl add entitles array constructed above to kwargs kwargs = { 'user_id': user_id, 'org_id': org_id, 'kickstart_session_id': ks_session_id, 'entitlements': entitlements_base.keys() + entitlements_extra.keys(), 'deploy_configs': deploy_configs, } log_debug(4, "Values", kwargs) if rereg_token_found and len(result) > 1: log_debug(4, "re-activation stacked with activationkeys") kwargs['remove_entitlements'] = entitlements_remove return ReRegistrationActivationToken(result, **kwargs) elif rereg_token_found: log_debug(4, "simple re-activation") return ReRegistrationToken([rereg_token_found], **kwargs) return ActivationTokens(result, **kwargs)
def runTransaction(server_id, action_id, dry_run=0): log_debug(3, server_id, action_id, dry_run) # Fetch package_delta_id h = rhnSQL.prepare(""" select package_delta_id from rhnActionPackageDelta where action_id = :action_id """) h.execute(action_id=action_id) row = h.fetchone_dict() if row is None: raise InvalidAction("invalid packages.runTransaction action %s for server %s" % (action_id, server_id)) package_delta_id = row['package_delta_id'] # Fetch packages h = rhnSQL.prepare(""" select tro.label as operation, pn.name, pe.version, pe.release, pe.epoch, pa.label as package_arch from rhnPackageDeltaElement pde, rhnTransactionPackage rp left join rhnPackageArch pa on rp.package_arch_id = pa.id, rhnTransactionOperation tro, rhnPackageName pn, rhnPackageEVR pe where pde.package_delta_id = :package_delta_id and pde.transaction_package_id = rp.id and rp.operation = tro.id and rp.name_id = pn.id and rp.evr_id = pe.id order by tro.label, pn.name """) h.execute(package_delta_id=package_delta_id) result = [] while 1: row = h.fetchone_dict() if not row: break operation = row['operation'] # Need to map the operations into codes the client/rpm understands if operation == 'insert': operation = 'i' elif operation == 'delete': operation = 'e' elif operation == 'upgrade': operation = 'u' else: # Unsupported continue # Fix null epochs epoch = row['epoch'] if epoch is None: epoch = '' name, version, release = row['name'], row['version'], row['release'] # The package arch can be null now because of the outer join package_arch = row['package_arch'] or "" result.append([ [name, version, release, epoch, package_arch], operation ]) return {'packages': result}
def _get_token_config_channels(token_id): h = rhnSQL.prepare(_query_token_config_channels) h.execute(token_id=token_id) return h.fetchall_dict() or []
def look_at_actions(server_id): h = rhnSQL.prepare(_query_action_lookup) h.execute(server_id=server_id) return h.fetchall_dict()
def _get_client_config_channels(self, server_id): h = rhnSQL.prepare(self._query_client_config_channels) h.execute(server_id=server_id) return h.fetchall_dict() or []
def token_config_channels(server, tokens_obj): assert (isinstance(tokens_obj, ActivationTokens)) server_id = server['id'] # If this is a re-registration token, it should not have any config # channel associated with it (and no deploy_configs either). We'll just # keep whatever config files they had on this profile if tokens_obj.is_rereg_token: return [] # Activation key order matters; config channels are stacked in order config_channels = [] config_channels_hash = {} deployment = 0 current_channels = [] if tokens_obj.forget_rereg_token: current_channels = _get_current_config_channels(server_id) for token in tokens_obj.tokens: channels = _get_token_config_channels(token['token_id']) # Check every token used and if any of them are set to not deploy configs # then we won't deploy configs for any config channels the system is subscribed to deploy_configs = token['deploy_configs'] log_debug(2, "token_id: ", token['token_id'], " deploy_configs: ", deploy_configs) if deploy_configs == 'Y': log_debug(2, "At least one token set to deploy config files") deployment = 1 for c in channels: config_channel_id = c['config_channel_id'] if not c['config_channel_id'] in current_channels and\ not config_channels_hash.has_key(config_channel_id): position = len(current_channels) + len(config_channels) + 1 # Update the position in the queue c['position'] = position config_channels.append(c) config_channels_hash[config_channel_id] = None ret = [] if config_channels: h = rhnSQL.prepare(_query_set_server_config_channels) h.execute_bulk({ 'server_id': [server_id] * len(config_channels), 'config_channel_id': map(lambda c: c['config_channel_id'], config_channels), 'position': map(lambda c: c['position'], config_channels), }) for channel in config_channels: msg = "Subscribed to config channel %s" % channel['name'] log_debug(4, msg) ret.append(msg) # Now that we have the server subscribed to config channels, # determine if we have to deploy the files too # Don't pass tokens_obj, we only need the token that provided the config # channels in the first place if deployment: log_debug( 2, "At least one token has deploy_configs == Y, deploying configs") deploy_configs_if_needed(server) rhnSQL.commit() return ret
SELECT rvis.id FROM rhnVirtualInstanceState rvis WHERE rvis.label = :state) """) bindings['state'] = properties[PropertyType.STATE] # Only touch the database if something changed. if new_values_array: new_values = string.join(new_values_array, ', ') bindings['row_id'] = existing_row['instance_id'] update_sql = """ UPDATE rhnVirtualInstanceInfo SET %s WHERE instance_id=:row_id """ % (new_values) query = rhnSQL.prepare(update_sql) query.execute(**bindings) def __unconfirm_domains(self, system_id): update_sql = """ UPDATE rhnVirtualInstance SET confirmed=0 WHERE host_system_id=:sysid """ query = rhnSQL.prepare(update_sql) query.execute(sysid=system_id) def __confirm_domains(self, system_id): update_sql = """ UPDATE rhnVirtualInstance SET confirmed=1
def token_channels(server, server_arch, tokens_obj): """ Handle channel subscriptions for the registration token """ assert (isinstance(tokens_obj, ActivationTokens)) server_id, server_arch_id = server['id'], server['server_arch_id'] # what channels are associated with this token (filter only those # compatible with this server) h = rhnSQL.prepare(""" select rtc.channel_id id, c.name, c.label, c.parent_channel from rhnRegTokenChannels rtc, rhnChannel c, rhnServerChannelArchCompat scac where rtc.token_id = :token_id and rtc.channel_id = c.id and c.channel_arch_id = scac.channel_arch_id and scac.server_arch_id = :server_arch_id """) chash = {} base_channel_token = None base_channel_id = None for token in tokens_obj.tokens: token_id = token['token_id'] h.execute(token_id=token_id, server_arch_id=server_arch_id) while 1: row = h.fetchone_dict() if not row: break channel_id = row['id'] chash[channel_id] = row if row['parent_channel'] is not None: # Not a base channel continue # We only allow for one base channel if base_channel_id is not None and channel_id != base_channel_id: # Base channels conflict - are they coming from the same # token? if base_channel_token == token: log_error("Token has multiple base channels", token_id, base_channel_id) raise rhnFault( 62, _("Token `%s' has more than one base channel assigned") % token['note']) raise rhnFault(63, _("Conflicting base channels")) base_channel_id = channel_id base_channel_token = token bc = chash.get(base_channel_id) log_debug(4, "base channel", bc) # get the base channel for this server # Note that we are hitting this codepath after newserver.__save() has been # run, which means we've already chosen a base channel # from rhnDistChannelMap sbc = rhnChannel.get_base_channel(server_id, none_ok=1) # prepare the return value ret = [] # now try to figure out which base channel we prefer if bc is None: if sbc is None: # we need at least one base channel definition log_error( "Server has invalid release and " "token contains no base channels", server_id, tokens_obj.tokens) ret.append("System registered without a base channel") ret.append("Unsupported release-architecture combination " "(%s, %s)" % (server["release"], server_arch)) return ret else: # do we need to drop the one from sbc? if sbc and sbc["id"] != bc["id"]: # we need to prefer the token one # unsubscribe from old channel(s) rhnChannel.unsubscribe_all_channels(server_id) sbc = None # force true on the next test if sbc is None: # no base channel subscription at this point try: rhnChannel._subscribe_sql(server_id, bc["id"], commit=0) except rhnChannel.SubscriptionCountExceeded: ret.append("System registered without a base channel: " "subscription count exceeded for channel %s (%s)" % (bc["name"], bc["label"])) return ret ret.append("Subscribed to base channel '%s' (%s)" % (bc["name"], bc["label"])) sbc = bc # attempt to subscribe all non-base channels associated with this # token subscribe_channel = rhnSQL.Procedure("rhn_channel.subscribe_server") # Use a set here to ensure uniqueness of the # channel family ids used in the loop below. channel_family_ids = set() for c in filter(lambda a: a["parent_channel"], chash.values()): # make sure this channel has the right parent if str(c["parent_channel"]) != str(sbc["id"]): ret.append("NOT subscribed to channel '%s' " "(not a child of '%s')" % (c["name"], sbc["name"])) continue try: # don't run the EC yet # XXX: test return code when this one will start returning # a status subscribe_channel(server_id, c["id"], 0, None, 0) child = rhnChannel.Channel() child.load_by_id(c["id"]) child._load_channel_families() cfamid = child._channel_families[0] channel_family_ids.add(cfamid) except rhnSQL.SQLError, e: log_error("Failed channel subscription", server_id, c["id"], c["label"], c["name"]) ret.append("FAILED to subscribe to channel '%s'" % c["name"]) else: ret.append("Subscribed to channel '%s'" % c["name"])
def import_kickstart(self, plug, url, repo_label): pxeboot_path = 'images/pxeboot/' pxeboot = plug.get_file(pxeboot_path) if pxeboot is None: if not re.search(r'/$', url): url = url + '/' self.error_msg("ERROR: kickstartable tree not detected (no %s%s)" % (url, pxeboot_path)) return if rhnSQL.fetchone_dict(""" select id from rhnKickstartableTree where org_id = :org_id and channel_id = :channel_id and label = :label """, org_id=self.channel['org_id'], channel_id=self.channel['id'], label=repo_label): print "Kickstartable tree %s already synced." % repo_label return row = rhnSQL.fetchone_dict(""" select sequence_nextval('rhn_kstree_id_seq') as id from dual """) ks_id = row['id'] ks_path = 'rhn/kickstart/%s/%s' % (self.channel['org_id'], repo_label) row = rhnSQL.execute(""" insert into rhnKickstartableTree (id, org_id, label, base_path, channel_id, kstree_type, install_type, last_modified, created, modified) values (:id, :org_id, :label, :base_path, :channel_id, ( select id from rhnKSTreeType where label = 'externally-managed'), ( select id from rhnKSInstallType where label = 'generic_rpm'), current_timestamp, current_timestamp, current_timestamp) """, id=ks_id, org_id=self.channel['org_id'], label=repo_label, base_path=os.path.join(CFG.MOUNT_POINT, ks_path), channel_id=self.channel['id']) insert_h = rhnSQL.prepare(""" insert into rhnKSTreeFile (kstree_id, relative_filename, checksum_id, file_size, last_modified, created, modified) values (:id, :path, lookup_checksum('sha256', :checksum), :st_size, epoch_seconds_to_timestamp_tz(:st_time), current_timestamp, current_timestamp) """) dirs = [''] while len(dirs) > 0: d = dirs.pop(0) v = None if d == pxeboot_path: v = pxeboot else: v = plug.get_file(d) if v is None: continue for s in (m.group(1) for m in re.finditer(r'(?i)<a href="(.+?)"', v)): if (re.match(r'/', s) or re.search(r'\?', s) or re.search(r'\.\.', s) or re.match(r'[a-zA-Z]+:', s) or re.search(r'\.rpm$', s)): continue if re.search(r'/$', s): dirs.append(d + s) continue local_path = os.path.join(CFG.MOUNT_POINT, ks_path, d, s) if os.path.exists(local_path): print "File %s%s already present locally" % (d, s) else: print "Retrieving %s" % d + s plug.get_file(d + s, os.path.join(CFG.MOUNT_POINT, ks_path)) st = os.stat(local_path) insert_h.execute(id=ks_id, path=d + s, checksum=getFileChecksum( 'sha256', local_path), st_size=st.st_size, st_time=st.st_mtime) rhnSQL.commit()
def __db_insert_system(self, identity, system_id, uuid, virt_type): """ Inserts a new system into the database. """ # If this system is a host, it's sysid goes into the host_system_id # column. Otherwise, it's sysid goes into the virtual_system_id # column. host_id = None guest_id = None if identity == IdentityType.HOST: host_id = system_id elif identity == IdentityType.GUEST: guest_id = system_id # Check to see if this uuid has already been registered to a # host and is confirmed. check_sql = """ select vi.id, vi.host_system_id, vi.confirmed from rhnVirtualInstance vi where vi.uuid = :uuid and confirmed = 1 """ query = rhnSQL.prepare(check_sql) query.execute(uuid=uuid, system_id=system_id) row = query.fetchone_dict() if row: # We found a host for this guest, we'll save the value # to use when we create the row in rhnVirtualInstance. host_id = row['host_system_id'] else: # We didn't find a host, this guest will just end up with # no host, and consuming physical entitlements. pass else: raise VirtualizationEventError("Unknown identity:", identity) get_id_sql = "SELECT sequence_nextval('rhn_vi_id_seq') as id FROM dual" query = rhnSQL.prepare(get_id_sql) query.execute() row = query.fetchone_dict() or {} if not row or not row.has_key('id'): raise VirtualizationEventError('unable to get virt instance id') insert_sql = """ INSERT INTO rhnVirtualInstance (id, host_system_id, virtual_system_id, uuid, confirmed) VALUES (:id, :host_id, :guest_id, :uuid, 1) """ query = rhnSQL.prepare(insert_sql) query.execute(id=row['id'], host_id=host_id, guest_id=guest_id, uuid=uuid) # Initialize a dummy info record for this system. insert_sql = """ INSERT INTO rhnVirtualInstanceInfo (instance_id, state, instance_type) VALUES (:id, ( SELECT rvis.id FROM rhnVirtualInstanceState rvis WHERE rvis.label = :state ), ( SELECT rvit.id FROM rhnVirtualInstanceType rvit WHERE rvit.label = :virt_type )) """ query = rhnSQL.prepare(insert_sql) query.execute(id=row['id'], state=ServerStateType.UNKNOWN, virt_type=virt_type)
def upload_updates(self, notices): batch = [] skipped_updates = 0 typemap = { 'security': 'Security Advisory', 'recommended': 'Bug Fix Advisory', 'bugfix': 'Bug Fix Advisory', 'optional': 'Product Enhancement Advisory', 'feature': 'Product Enhancement Advisory', 'enhancement': 'Product Enhancement Advisory' } for notice in notices: notice = self.fix_notice(notice) existing_errata = self.get_errata(notice['update_id']) e = Erratum() e['errata_from'] = notice['from'] e['advisory'] = notice['update_id'] e['advisory_name'] = notice['update_id'] e['advisory_rel'] = notice['version'] e['advisory_type'] = typemap.get(notice['type'], 'Product Enhancement Advisory') e['product'] = notice['release'] or 'Unknown' e['description'] = notice['description'] e['synopsis'] = notice['title'] or notice['update_id'] if (notice['type'] == 'security' and notice['severity'] and not e['synopsis'].startswith(notice['severity'] + ': ')): e['synopsis'] = notice['severity'] + ': ' + e['synopsis'] e['topic'] = ' ' e['solution'] = ' ' e['issue_date'] = self._to_db_date(notice['issued']) if notice['updated']: e['update_date'] = self._to_db_date(notice['updated']) else: e['update_date'] = self._to_db_date(notice['issued']) e['org_id'] = self.channel['org_id'] e['notes'] = '' e['refers_to'] = '' e['channels'] = [] e['packages'] = [] e['files'] = [] if existing_errata: e['channels'] = existing_errata['channels'] e['packages'] = existing_errata['packages'] e['channels'].append({'label': self.channel_label}) for pkg in notice['pkglist'][0]['packages']: param_dict = { 'name': pkg['name'], 'version': pkg['version'], 'release': pkg['release'], 'arch': pkg['arch'], 'channel_id': int(self.channel['id']), } if pkg['epoch'] == '0': epochStatement = "(pevr.epoch is NULL or pevr.epoch = '0')" elif pkg['epoch'] is None or pkg['epoch'] == '': epochStatement = "pevr.epoch is NULL" else: epochStatement = "pevr.epoch = :epoch" param_dict['epoch'] = pkg['epoch'] if self.channel['org_id']: param_dict['org_id'] = self.channel['org_id'] orgStatement = "= :org_id" else: orgStatement = "is NULL" h = rhnSQL.prepare(""" select p.id, pevr.epoch, c.checksum, c.checksum_type from rhnPackage p join rhnPackagename pn on p.name_id = pn.id join rhnpackageevr pevr on p.evr_id = pevr.id join rhnpackagearch pa on p.package_arch_id = pa.id join rhnArchType at on pa.arch_type_id = at.id join rhnChecksumView c on p.checksum_id = c.id join rhnChannelPackage cp on p.id = cp.package_id where pn.name = :name and p.org_id %s and pevr.version = :version and pevr.release = :release and pa.label = :arch and %s and at.label = 'rpm' and cp.channel_id = :channel_id """ % (orgStatement, epochStatement)) h.execute(**param_dict) cs = h.fetchone_dict() or None if not cs: if param_dict.has_key('epoch'): epoch = param_dict['epoch'] + ":" else: epoch = "" log_debug( 1, "No checksum found for %s-%s%s-%s.%s." " Skipping Package" % (param_dict['name'], epoch, param_dict['version'], param_dict['release'], param_dict['arch'])) continue newpkgs = [] for oldpkg in e['packages']: if oldpkg['package_id'] != cs['id']: newpkgs.append(oldpkg) package = IncompletePackage().populate(pkg) package['epoch'] = cs['epoch'] package['org_id'] = self.channel['org_id'] package['checksums'] = {cs['checksum_type']: cs['checksum']} package['checksum_type'] = cs['checksum_type'] package['checksum'] = cs['checksum'] package['package_id'] = cs['id'] newpkgs.append(package) e['packages'] = newpkgs if len(e['packages']) == 0: skipped_updates = skipped_updates + 1 continue e['keywords'] = [] if notice['reboot_suggested']: kw = Keyword() kw.populate({'keyword': 'reboot_suggested'}) e['keywords'].append(kw) if notice['restart_suggested']: kw = Keyword() kw.populate({'keyword': 'restart_suggested'}) e['keywords'].append(kw) e['bugs'] = [] e['cve'] = [] if notice['references']: bzs = [ r for r in notice['references'] if r['type'] == 'bugzilla' ] if len(bzs): tmp = {} for bz in bzs: if bz['id'] not in tmp: bug = Bug() bug.populate({ 'bug_id': bz['id'], 'summary': bz['title'], 'href': bz['href'] }) e['bugs'].append(bug) tmp[bz['id']] = None cves = [r for r in notice['references'] if r['type'] == 'cve'] if len(cves): tmp = {} for cve in cves: if cve['id'] not in tmp: e['cve'].append(cve['id']) tmp[cve['id']] = None e['locally_modified'] = None batch.append(e) if skipped_updates > 0: self.print_msg("%d errata skipped because of empty package list." % skipped_updates) backend = SQLBackend() importer = ErrataImport(batch, backend) importer.run() self.regen = True
#!/usr/bin/env python import time from spacewalk.server import rhnSQL rhnSQL.initDB() task_query = rhnSQL.prepare(""" SELECT COUNT(DISTINCT task.name) AS count FROM rhnTaskoRun run JOIN rhnTaskoTemplate template ON template.id = run.template_id JOIN rhnTaskoBunch bunch ON bunch.id = template.bunch_id JOIN rhnTaskoTask task ON task.id = template.task_id WHERE bunch.name = 'mgr-sync-refresh-bunch' AND run.end_time IS NOT NULL """); print("Waiting for mgr-sync refresh to finish...") while True: task_query.execute() if task_query.fetchone_dict()['count'] > 0: break print("...not finished yet...") time.sleep(10) print("Done.")
def update_date(self): """ Updates the last sync time""" h = rhnSQL.prepare( """update rhnChannel set LAST_SYNCED = current_timestamp where label = :channel""") h.execute(channel=self.channel['label'])
def import_packages(self, plug, source_id, url): if (not self.filters) and source_id: h = rhnSQL.prepare(""" select flag, filter from rhnContentSourceFilter where source_id = :source_id order by sort_order """) h.execute(source_id=source_id) filter_data = h.fetchall_dict() or [] filters = [(row['flag'], re.split(r'[,\s]+', row['filter'])) for row in filter_data] else: filters = self.filters packages = plug.list_packages(filters) to_process = [] num_passed = len(packages) self.print_msg("Packages in repo: %5d" % plug.num_packages) if plug.num_excluded: self.print_msg("Packages passed filter rules: %5d" % num_passed) channel_id = int(self.channel['id']) if self.channel['org_id']: self.channel['org_id'] = int(self.channel['org_id']) else: self.channel['org_id'] = None for pack in packages: db_pack = rhnPackage.get_info_for_package( [pack.name, pack.version, pack.release, pack.epoch, pack.arch], channel_id, self.channel['org_id']) to_download = True to_link = True if db_pack['path']: pack.path = os.path.join(CFG.MOUNT_POINT, db_pack['path']) if self.match_package_checksum(pack.path, pack.checksum_type, pack.checksum): # package is already on disk to_download = False if db_pack['channel_id'] == channel_id: # package is already in the channel to_link = False elif db_pack['channel_id'] == channel_id: # different package with SAME NVREA self.disassociate_package(db_pack) if to_download or to_link: to_process.append((pack, to_download, to_link)) num_to_process = len(to_process) if num_to_process == 0: self.print_msg("No new packages to sync.") return else: self.print_msg("Packages already synced: %5d" % (num_passed - num_to_process)) self.print_msg("Packages to sync: %5d" % num_to_process) self.regen = True is_non_local_repo = (url.find("file://") < 0) def finally_remove(path): if is_non_local_repo and path and os.path.exists(path): os.remove(path) # try/except/finally doesn't work in python 2.4 (RHEL5), so here's a hack for (index, what) in enumerate(to_process): pack, to_download, to_link = what localpath = None # pylint: disable=W0703 try: self.print_msg("%d/%d : %s" % (index + 1, num_to_process, pack.getNVREA())) if to_download: pack.path = localpath = plug.get_package(pack) pack.load_checksum_from_header() if to_download: pack.upload_package(self.channel) finally_remove(localpath) except KeyboardInterrupt: finally_remove(localpath) raise except Exception, e: self.error_msg(e) finally_remove(localpath) if self.fail: raise to_process[index] = (pack, False, False) continue