Ejemplo n.º 1
0
def populate_rhn_dist_channel_map(channel_id, channel_arch_id, org_id, release):
    if not release:
        release = 'unit test'

    lookup = """
        SELECT 1 FROM rhnDistChannelMap
            WHERE release = :release AND
                channel_arch_id  = :channel_arch_id AND
                org_id = :org_id
    """

    h = rhnSQL.prepare(lookup)
    h.execute(
        release         = release,
        channel_arch_id = channel_arch_id,
        org_id          = org_id
    )
    if h.fetchone_dict():
        return

    query_create = """
       INSERT INTO rhnDistChannelMap
              (os, release, channel_arch_id, channel_id, org_id)
       VALUES (:os, :release, :channel_arch_id, :channel_id, :org_id)
    """

    h = rhnSQL.prepare(query_create)
    h.execute(
        os              = "TestOS",
        release         = release,
        channel_arch_id = channel_arch_id,
        channel_id      = channel_id,
        org_id          = org_id
    )
    rhnSQL.commit()
    def test_new_server_token_2(self):
        "Test registration with token that specifies a base channel"
        u = self._create_new_user()
        org_id = u.contact['org_id']
        base_channel = 'rhel-i386-as-3'
        entitlements = self._entitlements
        t = misc_functions.create_activation_key(org_id=u.contact['org_id'],
            entitlement_level=entitlements, user_id=u.getid(),
            channels=[base_channel])

        token = t.get_token()
        
        params = build_new_system_params_with_token(token=token,
            os_release="2.1AS")

        system_id = register_new_system(params)
        rhnSQL.commit()

        s = rhnServer.get(system_id)
        self.assertNotEqual(s, None)

        server_id = s.getid()
        channels = rhnChannel.channels_for_server(server_id)
        self.assertEqual(len(channels), 1)
        self.assertEqual(channels[0]['label'], base_channel)
Ejemplo n.º 3
0
def store_rhnCryptoKey(description, cert, org_id, verbosity=0):
    """ stores cert in rhnCryptoKey
        uses:
            _checkCertMatch_rhnCryptoKey
            _delete_rhnCryptoKey - not currently used
            _insertPrep_rhnCryptoKey
            _lobUpdate_rhnCryptoKey
    """
    try:
        # look for a cert match in the database
        rhn_cryptokey_id = _checkCertMatch_rhnCryptoKey(cert, description,
                                                        org_id, deleteRowYN=1,
                                                        verbosity=verbosity)
        if rhn_cryptokey_id is None:
            # nothing to do - cert matches
            return
        # insert into the database
        if rhn_cryptokey_id == -1:
            rhn_cryptokey_id = _insertPrep_rhnCryptoKey(rhn_cryptokey_id,
                                                        description, org_id)
        # write/update
        _lobUpdate_rhnCryptoKey(rhn_cryptokey_id, cert)
        rhnSQL.commit()
    except rhnSQL.sql_base.SQLError:
        raise_with_tb(CaCertInsertionError(
            "...the traceback: %s" % fetchTraceback()), sys.exc_info()[2])
Ejemplo n.º 4
0
def main():
    rhnSQL.initDB()

    blob_values1 = [
        # Regular update
        [1, 1,     'value 11', 'value 12', 1],
        [2, 1,     'value 21', 'value 22', 2],
        # Update with one of the primary keys being None
        [3, None,  'value 31', 'value 32', 3],
        [4, None,  'value 41', 'value 42', 4],
        # Test for writing an empty string into the blob
        [5, 5,     '',         'value 52', 5],
        # Test for writing a shorter string into the blob
        [6, 6,     'value 61', 'value 62', 6],
    ]
    newval1_1 = 'new value 11'
    newval1_2 = 'new value 12'
    newval3_1 = 'new value 31 ' * 1024
    newval3_2 = 'new value 32' * 2048
    newval5_1 = 'new value 51'
    newval5_2 = ''
    newval6_1 = 'v61'
    newval6_2 = 'v61'
    blob_values2 = blob_values1[:]
    for r in [0, 2, 4, 5]:
        # Copy the old values
        blob_values2[r] = blob_values1[r][:]
    blob_values2[0][2:5] = [newval1_1, newval1_2, 11]
    blob_values2[2][2:5] = [newval3_1, newval3_2, 2]
    blob_values2[4][2:5] = [newval5_1, newval5_2, 33]
    blob_values2[5][2:5] = [newval6_1, newval6_2, 4]

    test_blob_update = Table("test_blob_update",
                             fields={
                                 'id1': DBint(),
                                 'id2': DBint(),
                                 'val1': DBblob(),
                                 'val2': DBblob(),
                                 'nval': DBint(),
                             },
                             # Setting the nullable column to be the first one, to force a specific codepath
                             pk=['id2', 'id1'],
                             nullable=['id2'],
                             )

    fields = ['id1', 'id2', 'val1', 'val2', 'nval']
    setup(test_blob_update, blob_values1, fields)
    print("Insert test")
    verify(blob_values1)

    t = TableUpdate(test_blob_update, rhnSQL)

    rows = [0, 2, 4, 5]
    values = _build_update_hash(fields, blob_values2, rows)

    t.query(values)
    rhnSQL.commit()

    print("Updates test")
    verify(blob_values2)
Ejemplo n.º 5
0
 def update_client_message_received(self, jid):
     jid = str(jid)
     state_id = self._get_push_state_id('online')
     h = rhnSQL.prepare(self._query_update_client_message_received)
     ret = h.execute(jid=jid, state_id=state_id)
     if ret:
         rhnSQL.commit()
Ejemplo n.º 6
0
    def handler(self, req):
        ret = basePackageUpload.BasePackageUpload.handler(self, req)
        if ret != apache.OK:
            return ret

        a_pkg = rhnPackageUpload.save_uploaded_package(req,
                                                       (self.package_name, None, self.package_version,
                                                        self.package_release, self.package_arch),
                                                       str(self.org_id),
                                                       self.packaging,
                                                       self.file_checksum_type, self.file_checksum)

        self.rel_package_path = rhnPackageUpload.relative_path_from_header(
            a_pkg.header, org_id=self.org_id,
            checksum_type=a_pkg.checksum_type, checksum=a_pkg.checksum)
        self.package_path = os.path.join(CFG.MOUNT_POINT,
                                         self.rel_package_path)

        package_dict, diff_level = rhnPackageUpload.push_package(a_pkg,
                                                                 force=self.force,
                                                                 relative_path=self.rel_package_path, org_id=self.org_id)

        if diff_level:
            return self._send_package_diff(req, diff_level, package_dict)

        # Everything went fine
        rhnSQL.commit()
        reply = "All OK"
        req.headers_out['Content-Length'] = str(len(reply))
        req.send_http_header()
        req.write(reply)
        log_debug(2, "Returning with OK")

        return apache.OK
Ejemplo n.º 7
0
    def _update_package_data(self, crash_id, pkg_data):
        log_debug(1, "_update_package_data: %s, %s" % (crash_id, pkg_data))
        # Older versions of abrt used to store the package info in a single 'package' file
        if pkg_data and 'package' in pkg_data:
            (n, e, v, r) = parseRPMName(pkg_data['package'])
            if not all((n, e, v, r)):
                return 0

            h = rhnSQL.prepare(_query_update_pkg_data1)
            r = h.execute(
                crash_id=crash_id,
                pkg_name=n,
                pkg_epoch=e,
                pkg_version=v,
                pkg_release=r)
            rhnSQL.commit()

            return r

        for item in ['pkg_name', 'pkg_epoch', 'pkg_version', 'pkg_release', 'pkg_arch']:
            if not (item in pkg_data and pkg_data[item]):
                return 0

        h = rhnSQL.prepare(_query_update_pkg_data2)
        r = h.execute(
            crash_id=crash_id,
            pkg_name=pkg_data['pkg_name'],
            pkg_epoch=pkg_data['pkg_epoch'],
            pkg_version=pkg_data['pkg_version'],
            pkg_release=pkg_data['pkg_release'],
            pkg_arch=pkg_data['pkg_arch'])
        rhnSQL.commit()

        return r
    def test_new_server_token_1(self):
        "test registration with token"
        u, _ = self._create_new_user()
        org_id = u.contact['org_id']
        entitlements = self._entitlements
        os_release = "2.1as"

        t = misc_functions.create_activation_key(
            org_id=u.contact['org_id'],
            entitlement_level=entitlements,
            user_id=u.getid(),
            release=os_release
        )

        token = t.get_token()

        params = build_new_system_params_with_token(
            token=token,
            os_release=os_release
        )

        system_id = register_new_system(params)
        rhnSQL.commit()

        s = rhnServer.get(system_id)
        self.assertNotEqual(s, None)
Ejemplo n.º 9
0
    def management_remove_channel(self, dict):
        log_debug(1)
        self._get_and_validate_session(dict)

        config_channel = dict.get('config_channel')
        # XXX Validate the namespace

        row = rhnSQL.fetchone_dict(self._query_config_channel_by_label,
                                   org_id=self.org_id, label=config_channel)

        if not row:
            raise rhnFault(4009, "Channel not found")

        delete_call = rhnSQL.Procedure('rhn_config.delete_channel')

        try:
            delete_call(row['id'])
        except rhnSQL.SQLError:
            e = sys.exc_info()[1]
            errno = e.args[0]
            if errno == 2292:
                raise_with_tb(rhnFault(4005, "Cannot remove non-empty channel %s" %
                               config_channel, explain=0), sys.exc_info()[2])
            raise

        log_debug(5, "Removed:", config_channel)
        rhnSQL.commit()
        return ""
    def test_new_server_token_2(self):
        "Test registration with token that specifies a base channel"

        # FIXME: the test fails because there's no channel associated with the
        # freshly created Server: rhnServerChannel is not populated by the
        # registration code.

        u, _ = self._create_new_user()
        org_id = u.contact['org_id']
        base_channel = 'rhel-i386-as-3'
        entitlements = self._entitlements
        os_release = "2.1as"

        t = misc_functions.create_activation_key(org_id=u.contact['org_id'],
                                                 entitlement_level=entitlements, user_id=u.getid(),
                                                 channels=[base_channel], release=os_release)

        token = t.get_token()

        params = build_new_system_params_with_token(
            token=token,
            os_release=os_release
        )

        system_id = register_new_system(params)
        rhnSQL.commit()

        s = rhnServer.get(system_id)
        self.assertNotEqual(s, None)

        server_id = s.getid()
        channels = rhnChannel.channels_for_server(server_id)
        self.assertEqual(len(channels), 1)
        self.assertEqual(channels[0]['label'], base_channel)
    def test_new_server_1(self):
        "Test normal server registration, with username/password"
        u, password = self._create_new_user()
        username = u.contact['login']
        org_id = u.contact['org_id']
        entitlements = self._entitlements
        os_release = "2.1as"

        t = misc_functions.create_activation_key(
            org_id=u.contact['org_id'],
            entitlement_level=entitlements,
            user_id=u.getid(),
            release=os_release
        )

        params = build_new_system_params_with_username(username=username,
                                                       password=password, os_release=os_release)

        system_id = register_new_system(params)
        rhnSQL.commit()

        s = rhnServer.get(system_id)
        self.assertNotEqual(s, None)

        server_id = s.getid()
        channels = rhnChannel.channels_for_server(server_id)
        self.assertEqual(len(channels), 1)
        self.assertEqual(channels[0]['label'], self._channel)
Ejemplo n.º 12
0
def store_rhnCryptoKey(description, caCert, verbosity=0):
    """ stores CA cert in rhnCryptoKey
        uses:
            _checkCertMatch_rhnCryptoKey
            _delete_rhnCryptoKey - not currently used
            _insertPrep_rhnCryptoKey
            _lobUpdate_rhnCryptoKey
    """

    org_ids = get_all_orgs()
    for org_id in org_ids:
        org_id = org_id['id']
        try:
            ## look for a cert match in the database
            rhn_cryptokey_id = _checkCertMatch_rhnCryptoKey(caCert, description,
                                                          org_id, deleteRowYN=1,
                                                          verbosity=verbosity)
            if rhn_cryptokey_id is None:
                # nothing to do - cert matches
                continue
            ## insert into the database
            if rhn_cryptokey_id == -1:
                rhn_cryptokey_id = _insertPrep_rhnCryptoKey(rhn_cryptokey_id,
                                                            description, org_id)
            ## write/update
            _lobUpdate_rhnCryptoKey(rhn_cryptokey_id, caCert)
            rhnSQL.commit()
        except rhnSQL.sql_base.SQLError:
            raise CaCertInsertionError(
                "...the traceback: %s" % fetchTraceback()), None, sys.exc_info()[2]
Ejemplo n.º 13
0
    def test_execute_rowcount(self):
        """Tests row counts"""
        table_name = "misatest"
        try:
            tables = self._list_tables()
            if not table_name in tables:
                rhnSQL.execute("create table %s (id int, value int)" % table_name)
            else:
                rhnSQL.execute("delete from %s" % table_name)

            insert_statement = rhnSQL.Statement(
                "insert into %s values (:item_id, :value)" % table_name
            )
            h = rhnSQL.prepare(insert_statement)
            ret = h.execute(item_id=1, value=2)
            self.assertEqual(ret, 1)
            ret = h.execute(item_id=2, value=2)
            self.assertEqual(ret, 1)

            delete_statement = rhnSQL.Statement("delete from %s" % table_name)
            h = rhnSQL.prepare(delete_statement)
            ret = h.execute()
            self.assertEqual(ret, 2)
            rhnSQL.commit()
        finally:
            rhnSQL.execute("drop table %s" % table_name)
Ejemplo n.º 14
0
 def save_hardware(self):
     """ wrapper for the Hardware.save_hardware_byid() which requires the sysid """
     ret = self.save_hardware_byid(self.server["id"])
     # this function is primarily called from outside
     # so we have to commit here
     rhnSQL.commit()
     return ret
Ejemplo n.º 15
0
def delete_guests(server_id):
    """
    Callback used after a successful kickstart to remove any guest virtual
    instances, as well as their associated servers.
    """
    # First delete all the guest server objects:
    h = rhnSQL.prepare(_query_lookup_guests_for_host)
    h.execute(server_id=server_id)
    delete_server = rhnSQL.Procedure("delete_server")
    log_debug(4, "Deleting guests")
    while 1:
        row = h.fetchone_dict()
        if not row:
            break
        guest_id = row['virtual_system_id']
        log_debug(4, 'Deleting guest server: %s' % guest_id)
        try:
            if guest_id is not None:
                delete_server(guest_id)
        except rhnSQL.SQLError:
            log_error("Error deleting server: %s" % guest_id)

    # Finally delete all the virtual instances:
    log_debug(4, "Deleting all virtual instances for host")
    h = rhnSQL.prepare(_query_delete_virtual_instances)
    h.execute(server_id=server_id)

    # Commit all changes:
    try:
        rhnSQL.commit()
    except rhnSQL.SQLError:
        e = sys.exc_info()[1]
        log_error("Error committing transaction: %s" % e)
        rhnSQL.rollback()
Ejemplo n.º 16
0
 def save_packages(self, schedule=1):
     """ wrapper for the Packages.save_packages_byid() which requires the sysid """
     ret = self.save_packages_byid(self.server["id"], schedule=schedule)
     # this function is primarily called from outside
     # so we have to commit here
     rhnSQL.commit()
     return ret
Ejemplo n.º 17
0
    def management_create_channel(self, dict):
        log_debug(1)
        self._get_and_validate_session(dict)

        config_channel = dict.get('config_channel')
        # XXX Validate the namespace

        config_channel_name = dict.get('config_channel_name') or config_channel
        config_channel_description = dict.get('description') or config_channel

        row = rhnSQL.fetchone_dict(self._query_lookup_config_channel,
                                   org_id=self.org_id, config_channel=config_channel)
        if row:
            raise rhnFault(4010, "Configuration channel %s already exists" %
                           config_channel, explain=0)

        insert_call = rhnSQL.Function('rhn_config.insert_channel',
                                      rhnSQL.types.NUMBER())
        config_channel_id = insert_call(self.org_id,
                                        'normal',
                                        config_channel_name,
                                        config_channel,
                                        config_channel_description)

        rhnSQL.commit()
        return {}
Ejemplo n.º 18
0
 def update_client_message_sent(self, jid):
     jid = str(jid)
     h = rhnSQL.prepare(self._query_update_client_message_sent)
     delta = 10
     ret = h.execute(delta=delta, jid=jid)
     if ret:
         rhnSQL.commit()
Ejemplo n.º 19
0
    def update_crash_count(self, system_id, crash, crash_count):
        self.auth_system(system_id)

        log_debug(1, self.server_id, "Updating crash count for %s to %s" % (crash, crash_count))

        server_org_id = self.server.server['org_id']
        server_crash_dir = get_crash_path(str(server_org_id), str(self.server_id), crash)
        if not server_crash_dir:
            log_debug(1, self.server_id, "Error composing crash directory path")
            raise rhnFault(5002)

        h = rhnSQL.prepare(_query_update_crash_count)
        r = h.execute(
            crash_count=crash_count,
            server_id=self.server_id,
            crash=crash)
        rhnSQL.commit()

        if r == 0:
            log_debug(1, self.server_id, "No record for crash: %s" % crash)
            raise rhnFault(5005, "Invalid crash name: %s" % crash)

        absolute_dir = os.path.join(CFG.MOUNT_POINT, server_crash_dir)
        absolute_file = os.path.join(absolute_dir, 'count')

        log_debug(1, self.server_id, "Updating crash count file: %s" % absolute_file)
        f = open(absolute_file, 'w+')
        f.write(crash_count)
        f.close()

        return 1
Ejemplo n.º 20
0
    def main(self):
        parser = OptionParser(option_list=options_table)

        (self.options, _args) = parser.parse_args()

        rhnSQL.initDB()

        self._channels_hash = self._get_channels()

        package_ids = self._get_packages()
        if package_ids is None:
            return 1

        if self.options.backup_file:
            self._backup_packages(package_ids, self.options.backup_file)

        try:
            self._add_package_header_values(package_ids)
        except:
            rhnSQL.rollback()
            raise

        if self.options.commit:
            print "Commiting work"
            rhnSQL.commit()
        else:
            print "Rolling back"
            rhnSQL.rollback()
Ejemplo n.º 21
0
    def update_uuid(self, uuid, commit=1):
        log_debug(3, uuid)
        # XXX Should determine a way to do this dinamically
        uuid_col_length = 36
        if uuid is not None:
            uuid = str(uuid)
        if not uuid:
            log_debug('Nothing to do')
            return

        uuid = uuid[:uuid_col_length]
        server_id = self.server['id']
        log_debug(4, "Trimmed uuid", uuid, server_id)

        # Update this server's UUID (unique client identifier)
        h = rhnSQL.prepare(self._query_update_uuid)
        ret = h.execute(server_id=server_id, uuid=uuid)
        log_debug(4, "execute returned", ret)

        if ret != 1:
            # Row does not exist, have to create it
            h = rhnSQL.prepare(self._query_insert_uuid)
            h.execute(server_id=server_id, uuid=uuid)

        if commit:
            rhnSQL.commit()
Ejemplo n.º 22
0
def find_or_create_channel_arch(name, label):
    lookup = """
       SELECT id from rhnChannelArch
        WHERE label='%s' AND name = '%s'
    """ % (label, name)
    h = rhnSQL.prepare(lookup)
    h.execute()
    row = h.fetchone_dict()
    if row:
        return row['id']

    query_create = """
       INSERT INTO  rhnChannelArch
              (id, arch_type_id, label, name)
       VALUES (sequence_nextval('rhn_channel_arch_id_seq'), :arch_type_id, :label, :name)

    """
    arch_type_id = find_or_create_arch_type(name = name, label = label)
    h = rhnSQL.prepare(query_create)
    try:
        h.execute(
            arch_type_id = arch_type_id,
            label        = label,
            name         = name
        )
        rhnSQL.commit()
    except rhnSQL.SQLError, e:
        # if we're here that means we're voilating something
        raise
Ejemplo n.º 23
0
def update_push_client_jid(server_id, jid):
    h1 = rhnSQL.prepare(_query_delete_duplicate_client_jids)
    h1.execute(server_id=server_id, jid=jid)
    h2 = rhnSQL.prepare(_query_update_push_client_jid)
    h2.execute(server_id=server_id, jid=jid)
    rhnSQL.commit()
    return jid
Ejemplo n.º 24
0
def _delete_rpm_group(packageIds):

    references = [
        'rhnChannelPackage',
        'rhnErrataPackage',
        'rhnErrataPackageTMP',
        'rhnPackageChangelogRec',
        'rhnPackageConflicts',
        'rhnPackageFile',
        'rhnPackageObsoletes',
        'rhnPackageProvides',
        'rhnPackageRequires',
        'rhnPackageRecommends',
        'rhnPackageSuggests',
        'rhnPackageSupplements',
        'rhnPackageEnhances',
        'rhnPackageBreaks',
        'rhnPackagePredepends',
        'rhnServerNeededCache',
    ]
    deleteStatement = "delete from %s where package_id = :package_id"
    for table in references:
        h = rhnSQL.prepare(deleteStatement % table)
        count = h.executemany(package_id=packageIds)
        log_debug(3, "Deleted from %s: %d rows" % (table, count))
    deleteStatement = "delete from rhnPackage where id = :package_id"
    h = rhnSQL.prepare(deleteStatement)
    count = h.executemany(package_id=packageIds)
    if count:
        log_debug(2, "DELETED package id %s" % str(packageIds))
    else:
        log_error("No such package id %s" % str(packageIds))
    rhnSQL.commit()
Ejemplo n.º 25
0
 def _register_dispatcher(self, jabber_id, hostname, port):
     h = rhnSQL.prepare(self._query_update_register_dispatcher)
     rowcount = h.execute(jabber_id_in=jabber_id, hostname_in=hostname, port_in=port, password_in=self._password)
     if not rowcount:
         h = rhnSQL.prepare(self._query_insert_register_dispatcher)
         h.execute(jabber_id_in=jabber_id, hostname_in=hostname, port_in=port, password_in=self._password)
     rhnSQL.commit()
Ejemplo n.º 26
0
    def _repodata_taskomatic(self, file_name):
        log_debug(3, 'repodata', file_name)

        content_type = "application/x-gzip"

        if file_name in ["repomd.xml", "comps.xml"]:
            content_type = "text/xml"
        elif file_name not in ["primary.xml.gz", "other.xml.gz",
                               "filelists.xml.gz", "updateinfo.xml.gz", "Packages.gz"]:
            log_debug(2, "Unknown repomd file requested: %s" % file_name)
            raise rhnFault(6)

        # XXX this won't be repconned or CDNd
        if file_name == "comps.xml":
            return self._repodata_python(file_name)

        file_path = "%s/%s/%s" % (CFG.REPOMD_PATH_PREFIX, self.channelName, file_name)
        rhnFlags.set('Content-Type', content_type)
        try:
            rhnFlags.set('Download-Accelerator-Path', file_path)
            return self._getFile(CFG.REPOMD_CACHE_MOUNT_POINT + "/" + file_path)
        except IOError, e:
            # For file not found, queue up a regen, and return 404
            if e.errno == 2 and file_name != "comps.xml":
                taskomatic.add_to_repodata_queue(self.channelName,
                                                 "repodata request", file_name, bypass_filters=True)
                rhnSQL.commit()
                # This returns 404 to the client
                raise rhnFault(6), None, sys.exc_info()[2]
            raise
Ejemplo n.º 27
0
    def test_lobs(self):
        new_id = rhnSQL.Sequence('misatestlob_id_seq').next()
        h = rhnSQL.prepare("""
            insert into misatestlob (id, val) values (:id, empty_blob())
        """)
        h.execute(id=new_id)

        h = rhnSQL.prepare("""
            select val from misatestlob where id = :id for update of val
        """)
        h.execute(id=new_id)
        row = h.fetchone_dict()
        self.assertNotEqual(row, None)
        lob = row['val']
        s = ""
        for i in range(256):
            s = s + chr(i)
        lob.write(s)
        rhnSQL.commit()

        h = rhnSQL.prepare("""
            select val from misatestlob where id = :id
        """)
        h.execute(id=new_id)
        row = h.fetchone_dict()
        self.assertNotEqual(row, None)
        lob = row['val']
        data = rhnSQL.read_lob(lob)
        self.assertEqual(data, s)
Ejemplo n.º 28
0
    def client_set_namespaces(self, systemid, namespaces):
        self.auth_system(systemid)

        server_id = self.server.getid()
        org_id = self.server.server['org_id']

        h = rhnSQL.prepare("""
            delete from rhnServerConfigChannel where server_id = :server_id
        """)
        h.execute(server_id=server_id)

        h = rhnSQL.prepare("""
            insert into rhnServerConfigChannel (server_id, config_channel_id, position)
            select :server_id, id, :position
              from rhnConfigChannel
             where name = :config_channel
               and org_id = :org_id
        """)

        position = 0
        for config_channel in namespaces:
            rowcount = h.execute(server_id=server_id, position=position,
                                 config_channel=config_channel, org_id=org_id)
            if not rowcount:
                raise rhnFault(4009, "Unable to find config channel %s" %
                               config_channel, explain=0)
            position = position + 1

        rhnSQL.commit()
        return 0
Ejemplo n.º 29
0
    def install_missing_product_packages(self):
        '''
        Find missing products and schedule an action to install them
        '''
        h = rhnSQL.prepare(self._query_product_packages)
        package_names = {}
        h.execute(server_id=self.server['id'])
        while True:
            row = h.fetchone_dict()
            if not row:
                break
            pn_id = row['name_id']
            pa_id = row['arch_id']
            package_names[(pn_id, pa_id)] = row['name']

        if not package_names:
            return None

        package_arch_ids = package_names.keys()

        action_id = rhnAction.schedule_server_packages_update_by_arch(self.server['id'],
                                                                      package_arch_ids,
                                                                      org_id=self.server['org_id'],
                                                                      action_name="Product Package Auto-Install")
        for p in package_names.values():
            log_debug(1, "Scheduled for install:  '%s'" % p)

        rhnSQL.commit()

        return action_id
Ejemplo n.º 30
0
def find_or_create_arch_type(name, label):
    lookup = """
       SELECT id from rhnArchType
        WHERE label='%s' AND name = '%s'
    """ % (label, name)
    h = rhnSQL.prepare(lookup)
    h.execute()
    row = h.fetchone_dict()
    if row:
        return row['id']

    query_create = """
       INSERT INTO  rhnArchType
              (id, label, name)
       VALUES (sequence_nextval('rhn_archtype_id_seq'), :label, :name)

    """
    h = rhnSQL.prepare(query_create)
    try:
        h.execute(
            label=label,
            name=name
        )
        rhnSQL.commit()
    except rhnSQL.SQLError:
        e = sys.exc_info()[1]
        # if we're here that means we're voilating something
        raise

    return find_or_create_arch_type(name, label)
Ejemplo n.º 31
0
    def test_new_server_token_1(self):
        "test registration with token"
        u, _ = self._create_new_user()
        org_id = u.contact['org_id']
        entitlements = self._entitlements
        os_release = "2.1as"

        t = misc_functions.create_activation_key(
            org_id=u.contact['org_id'],
            entitlement_level=entitlements,
            user_id=u.getid(),
            release=os_release)

        token = t.get_token()

        params = build_new_system_params_with_token(token=token,
                                                    os_release=os_release)

        system_id = register_new_system(params)
        rhnSQL.commit()

        s = rhnServer.get(system_id)
        self.assertNotEqual(s, None)
Ejemplo n.º 32
0
    def __remove_errata(errata_id, advisory):
        """ Remove an errata. """

        channel_ids = errata_helper.channelsWithErrata(errata_id)

        for channel_id in channel_ids:
            _printLog("Removing '{0}' patch from channel '{1}'".format(
                advisory, channel_id))

            # delete errata from channel
            errata_helper.deleteChannelErrata(errata_id, channel_id)

            # Update the errata/package cache for the servers
            # use procedure rhn_channel.update_needed_cache(channel_id)
            log_debug(
                2, "Update Server Cache for channel '{0}'".format(channel_id))
            rhnSQL.commit()
            update_needed_cache = rhnSQL.Procedure(
                "rhn_channel.update_needed_cache")
            update_needed_cache(channel_id)
            rhnSQL.commit()

        errata_helper.deleteErrata(errata_id)
Ejemplo n.º 33
0
    def _repodata_taskomatic(self, file_name):
        log_debug(3, 'repodata', file_name)

        content_type = "application/x-gzip"

        if file_name in ["repomd.xml", "comps.xml"]:
            content_type = "text/xml"
        elif file_name not in [
                "primary.xml.gz", "other.xml.gz", "filelists.xml.gz",
                "updateinfo.xml.gz", "Packages.gz"
        ]:
            log_debug(2, "Unknown repomd file requested: %s" % file_name)
            raise rhnFault(6)

        # XXX this won't be repconned or CDNd
        if file_name == "comps.xml":
            return self._repodata_python(file_name)

        file_path = "%s/%s/%s" % (CFG.REPOMD_PATH_PREFIX, self.channelName,
                                  file_name)
        rhnFlags.set('Content-Type', content_type)
        try:
            rhnFlags.set('Download-Accelerator-Path', file_path)
            return self._getFile(CFG.REPOMD_CACHE_MOUNT_POINT + "/" +
                                 file_path)
        except IOError:
            e = sys.exc_info()[1]
            # For file not found, queue up a regen, and return 404
            if e.errno == 2 and file_name != "comps.xml":
                taskomatic.add_to_repodata_queue(self.channelName,
                                                 "repodata request",
                                                 file_name,
                                                 bypass_filters=True)
                rhnSQL.commit()
                # This returns 404 to the client
                raise_with_tb(rhnFault(6), sys.exc_info()[2])
            raise
Ejemplo n.º 34
0
def transaction(context=None):
    """
    Keeping writes to spacewalk as DRY as possible,
    and making sure that commit or rollback is always
    called as appropriate.
    """

    # TODO: Create proper class.
    #       once you start adding nested functions for
    #       a 'simple' context manager the time has come
    #       to just create that class instead of use the
    #       decorator.
    def log_exception(error):
        if context is not None:
            logger.exception("%s: %s", error, context)
        else:
            logger.exception("Unhandled exception: %s", error)

    def rollback():
        logger.warning("Rolling back rhnSQL transaction.")
        rhnSQL.rollback()

    try:
        yield
        rhnSQL.commit()
    except rhnFault as fault:
        logger.exception("Server fault caught: %s", fault)
        rollback()
    except Exception as error:
        msg = str(error)
        if msg == "Unknown action type salt.job" or \
           msg.find('rhnactionsaltjob') != -1:
            logger.error("Saltstack not fully supported by this system:\n%s.",
                         msg)
        else:
            log_exception(error)
        rollback()
Ejemplo n.º 35
0
    def test_new_channel_1(self):
        """Tests new channel creation"""
        cf = rhnChannel.ChannelFamily()
        cf.load_from_dict(self._new_channel_family_dict())
        cf.save()

        label = cf.get_label()
        vdict = self._new_channel_dict(label=label, channel_family=label)

        c = rhnChannel.Channel()
        for k, v in list(vdict.items()):
            method = getattr(c, "set_" + k)
            method(v)
        c.save()
        channel_id = c.get_id()

        c = rhnChannel.Channel()
        c.load_by_label(label)
        for k, v in list(vdict.items()):
            method = getattr(c, "get_" + k)
            dbv = method()
            self.assertEqual(v, dbv)
        rhnSQL.commit()
        return c
Ejemplo n.º 36
0
    def handler(self, req):
        ret = basePackageUpload.BasePackageUpload.handler(self, req)
        if ret != apache.OK:
            return ret

        a_pkg = rhnPackageUpload.save_uploaded_package(
            req, (self.package_name, None, self.package_version,
                  self.package_release, self.package_arch), str(self.org_id),
            self.packaging, self.file_checksum_type, self.file_checksum)

        self.rel_package_path = rhnPackageUpload.relative_path_from_header(
            a_pkg.header,
            org_id=self.org_id,
            checksum_type=a_pkg.checksum_type,
            checksum=a_pkg.checksum)
        self.package_path = os.path.join(CFG.MOUNT_POINT,
                                         self.rel_package_path)

        package_dict, diff_level = rhnPackageUpload.push_package(
            a_pkg,
            force=self.force,
            relative_path=self.rel_package_path,
            org_id=self.org_id)

        if diff_level:
            return self._send_package_diff(req, diff_level, package_dict)

        # Everything went fine
        rhnSQL.commit()
        reply = "All OK"
        req.headers_out['Content-Length'] = str(len(reply))
        req.send_http_header()
        req.write(reply)
        log_debug(2, "Returning with OK")

        return apache.OK
Ejemplo n.º 37
0
    def push_file(self, config_channel_id, file):
        try:
            result = self._push_file(config_channel_id, file)
        except ConfigFilePathIncomplete:
            e = sys.exc_info()[1]
            raise_with_tb(rhnFault(4015, "Full path of file '%s' must be specified" % e.file.get('path'),
                          explain=0), sys.exc_info()[2])

        except ConfigFileExistsError:
            e = sys.exc_info()[1]
            raise_with_tb(rhnFault(4013, "File %s already uploaded" % e.file.get('path'),
                          explain=0), sys.exc_info()[2])
        except ConfigFileVersionMismatchError:
            e = sys.exc_info()[1]
            raise_with_tb(rhnFault(4012, "File %s uploaded with a different "
                           "version" % e.file.get('path'), explain=0), sys.exc_info()[2])
        except ConfigFileMissingDelimError:
            e = sys.exc_info()[1]
            raise_with_tb(rhnFault(4008, "Delimiter not specified for file %s" %
                           e.file.get('path'), explain=0), sys.exc_info()[2])
        except ConfigFileMissingContentError:
            e = sys.exc_info()[1]
            raise_with_tb(rhnFault(4007, "No content sent for file %s" %
                           e.file.get('path'), explain=0), sys.exc_info()[2])
        except ConfigFileExceedsQuota:
            e = sys.exc_info()[1]
            raise_with_tb(rhnFault(4014, "File size of %s exceeds free quota space" %
                           e.file.get('path'), explain=0), sys.exc_info()[2])
        except ConfigFileTooLargeError:
            e = sys.exc_info()[1]
            raise_with_tb(rhnFault(4003, "File size of %s larger than %s bytes" %
                                         (e.file.get('path'), self._get_maximum_file_size()),
                                   explain=0), sys.exc_info()[2])

        rhnSQL.commit()
        return result
Ejemplo n.º 38
0
    def create_crash(self, system_id, crash_data, pkg_data):
        self.auth_system(system_id)
        log_debug(1, self.server_id, crash_data, pkg_data)

        self._check_crash_reporting_setting()

        if not (crash_data.has_key('crash') and crash_data.has_key('path')) or \
           not (crash_data['crash'] and crash_data['path']):
            log_debug(1, self.server_id, "The crash information is invalid or incomplete: %s" % str(crash_data))
            raise rhnFault(5000)

        server_org_id = self.server.server['org_id']
        server_crash_dir = get_crash_path(str(server_org_id), str(self.server_id), crash_data['crash'])
        if not server_crash_dir:
            log_debug(1, self.server_id, "Error composing crash directory path")
            raise rhnFault(5002)

        crash_id = self._get_crash_id(self.server_id, crash_data['crash'])
        log_debug(1, "crash_id: %s" % crash_id)

        if (crash_id is None):
            if not crash_data.has_key('count'):
                crash_data['count'] = 1

            h = rhnSQL.prepare(_query_create_crash)
            h.execute(
                server_id = self.server_id,
                crash = crash_data['crash'],
                path = crash_data['path'],
                crash_count = crash_data['count'],
                storage_path = server_crash_dir)
            rhnSQL.commit()
            self._update_package_data(self._get_crash_id(self.server_id, crash_data['crash']), pkg_data)
            return 1
        else:
            return 0
Ejemplo n.º 39
0
def find_or_create_arch_type(name, label):
    lookup = """
       SELECT id from rhnArchType
        WHERE label='%s' AND name = '%s'
    """ % (label, name)
    h = rhnSQL.prepare(lookup)
    h.execute()
    row = h.fetchone_dict()
    if row:
        return row['id']

    query_create = """
       INSERT INTO  rhnArchType
              (id, label, name)
       VALUES (sequence_nextval('rhn_archtype_id_seq'), :label, :name)

    """
    h = rhnSQL.prepare(query_create)
    try:
        h.execute(label=label, name=name)
        rhnSQL.commit()
    except rhnSQL.SQLError, e:
        # if we're here that means we're voilating something
        raise
Ejemplo n.º 40
0
def create_activation_key(org_id=None,
                          user_id=None,
                          groups=None,
                          channels=None,
                          entitlement_level=None,
                          note=None,
                          server_id=None,
                          release=None):
    if org_id is None:
        need_user = 1
        org_id = create_new_org()
    else:
        need_user = 0

    if user_id is None:
        if need_user:
            u = create_new_user(org_id=org_id)
            user_id = u.getid()
    else:
        u = rhnUser.User("", "")
        u.reload(user_id)

    if groups is None:
        groups = []
        for i in range(3):
            params = build_server_group_params(org_id=org_id)
            sg = create_server_group(params)
            groups.append(sg.get_id())

    if channels is None:
        channels = ['rhel-i386-as-3-beta', 'rhel-i386-as-2.1-beta']

    channel_arch_id = find_or_create_channel_arch(name="channel - test",
                                                  label="test")

    # ensure channels are created
    for channel_label in channels:
        channel = add_channel(label=channel_label,
                              org_id=org_id,
                              channel_arch_id=channel_arch_id)
        populate_rhn_dist_channel_map(channel_id=channel['id'],
                                      channel_arch_id=channel_arch_id,
                                      org_id=org_id,
                                      release=release)

    if entitlement_level is None:
        entitlement_level = 'provisioning_entitled'

    if note is None:
        note = "Test activation key %d" % int(time.time())

    a = rhnActivationKey.ActivationKey()
    a.set_user_id(user_id)
    a.set_org_id(org_id)
    a.set_entitlement_level(entitlement_level)
    a.set_note(note)
    a.set_server_groups(groups)
    a.set_channels(channels)
    a.set_server_id(server_id)
    a.save()
    rhnSQL.commit()

    return a
def main():
    rhnSQL.initDB()

    blob_values1 = [
        # Regular update
        [1, 1, 'value 11', 'value 12', 1],
        [2, 1, 'value 21', 'value 22', 2],
        # Update with one of the primary keys being None
        [3, None, 'value 31', 'value 32', 3],
        [4, None, 'value 41', 'value 42', 4],
        # Test for writing an empty string into the blob
        [5, 5, '', 'value 52', 5],
        # Test for writing a shorter string into the blob
        [6, 6, 'value 61', 'value 62', 6],
    ]
    newval1_1 = 'new value 11'
    newval1_2 = 'new value 12'
    newval3_1 = 'new value 31 ' * 1024
    newval3_2 = 'new value 32' * 2048
    newval5_1 = 'new value 51'
    newval5_2 = ''
    newval6_1 = 'v61'
    newval6_2 = 'v61'
    blob_values2 = blob_values1[:]
    for r in [0, 2, 4, 5]:
        # Copy the old values
        blob_values2[r] = blob_values1[r][:]
    blob_values2[0][2:5] = [newval1_1, newval1_2, 11]
    blob_values2[2][2:5] = [newval3_1, newval3_2, 2]
    blob_values2[4][2:5] = [newval5_1, newval5_2, 33]
    blob_values2[5][2:5] = [newval6_1, newval6_2, 4]

    test_blob_update = Table(
        "test_blob_update",
        fields={
            'id1': DBint(),
            'id2': DBint(),
            'val1': DBblob(),
            'val2': DBblob(),
            'nval': DBint(),
        },
        # Setting the nullable column to be the first one, to force a specific codepath
        pk=['id2', 'id1'],
        nullable=['id2'],
    )

    fields = ['id1', 'id2', 'val1', 'val2', 'nval']
    setup(test_blob_update, blob_values1, fields)
    print("Insert test")
    verify(blob_values1)

    t = TableUpdate(test_blob_update, rhnSQL)

    rows = [0, 2, 4, 5]
    values = _build_update_hash(fields, blob_values2, rows)

    t.query(values)
    rhnSQL.commit()

    print("Updates test")
    verify(blob_values2)
Ejemplo n.º 42
0
 def save_suse_products(self):
     ret = self.save_suse_products_byid(self.server["id"])
     rhnSQL.commit()
     return ret
Ejemplo n.º 43
0
 def save_history(self):
     ret = self.save_history_byid(self.server["id"])
     # this function is primarily called from outside
     # so we have to commit here
     rhnSQL.commit()
     return ret
Ejemplo n.º 44
0
    def sync(self, update_repodata=True):
        """Trigger a reposync"""
        failed_packages = 0
        sync_error = 0
        if not self.urls:
            sync_error = -1
        start_time = datetime.now()
        for (repo_id, url, repo_label) in self.urls:
            log(0, "Repo URL: %s" % url)
            plugin = None

            # If the repository uses a uln:// URL, switch to the ULN plugin, overriding the command-line
            if url.startswith("uln://"):
                self.repo_plugin = self.load_plugin("uln")

            # pylint: disable=W0703
            try:
                if repo_label:
                    repo_name = repo_label
                else:
                    # use modified relative_url as name of repo plugin, because
                    # it used as name of cache directory as well
                    relative_url = '_'.join(url.split('://')[1].split('/')[1:])
                    repo_name = relative_url.replace("?", "_").replace(
                        "&", "_").replace("=", "_")

                plugin = self.repo_plugin(url,
                                          repo_name,
                                          org=str(self.org_id or ''),
                                          channel_label=self.channel_label)

                if update_repodata:
                    plugin.clear_cache()

                if repo_id is not None:
                    keys = rhnSQL.fetchall_dict("""
                        select k1.key as ca_cert, k2.key as client_cert, k3.key as client_key
                        from rhncontentsource cs inner join
                             rhncontentsourcessl csssl on cs.id = csssl.content_source_id inner join
                             rhncryptokey k1 on csssl.ssl_ca_cert_id = k1.id left outer join
                             rhncryptokey k2 on csssl.ssl_client_cert_id = k2.id left outer join
                             rhncryptokey k3 on csssl.ssl_client_key_id = k3.id
                        where cs.id = :repo_id
                        """,
                                                repo_id=int(repo_id))
                    if keys:
                        ssl_set = get_single_ssl_set(
                            keys, check_dates=self.check_ssl_dates)
                        if ssl_set:
                            plugin.set_ssl_options(ssl_set['ca_cert'],
                                                   ssl_set['client_cert'],
                                                   ssl_set['client_key'])
                        else:
                            raise ValueError(
                                "No valid SSL certificates were found for repository."
                            )

                if not self.no_packages:
                    ret = self.import_packages(plugin, repo_id, url)
                    failed_packages += ret
                    self.import_groups(plugin, url)

                if not self.no_errata:
                    self.import_updates(plugin, url)

                # only for repos obtained from the DB
                if self.sync_kickstart and repo_label:
                    try:
                        self.import_kickstart(plugin, repo_label)
                    except:
                        rhnSQL.rollback()
                        raise
            except Exception:
                e = sys.exc_info()[1]
                log2(0, 0, "ERROR: %s" % e, stream=sys.stderr)
                log2disk(0, "ERROR: %s" % e)
                # pylint: disable=W0104
                sync_error = -1
            if plugin is not None:
                plugin.clear_ssl_cache()
        # Update cache with package checksums
        rhnCache.set(checksum_cache_filename, self.checksum_cache)
        if self.regen:
            taskomatic.add_to_repodata_queue_for_channel_package_subscription(
                [self.channel_label], [], "server.app.yumreposync")
            taskomatic.add_to_erratacache_queue(self.channel_label)
        self.update_date()
        rhnSQL.commit()

        # update permissions
        fileutils.createPath(os.path.join(
            CFG.MOUNT_POINT,
            'rhn'))  # if the directory exists update ownership only
        for root, dirs, files in os.walk(os.path.join(CFG.MOUNT_POINT, 'rhn')):
            for d in dirs:
                fileutils.setPermsPath(os.path.join(root, d), group='apache')
            for f in files:
                fileutils.setPermsPath(os.path.join(root, f), group='apache')
        elapsed_time = datetime.now() - start_time
        log(
            0, "Sync of channel completed in %s." %
            str(elapsed_time).split('.')[0])
        # if there is no global problems, but some packages weren't synced
        if sync_error == 0 and failed_packages > 0:
            sync_error = failed_packages
        return elapsed_time, sync_error
Ejemplo n.º 45
0
    def get(self, system_id, version=1, status={}):
        # Authenticate the system certificate
        if CFG.DISABLE_CHECKINS:
            self.update_checkin = 0
        else:
            self.update_checkin = 1
        self.auth_system(system_id)
        log_debug(1, self.server_id, version,
                  "checkins %s" % ["disabled", "enabled"][self.update_checkin])
        if status:
            self.__update_status(status)

        # Update the capabilities list
        rhnCapability.update_client_capabilities(self.server_id)

        # Invalidate failed actions
        self._invalidate_failed_prereq_actions()

        server_locked = self.server.server_locked()
        log_debug(3, "Server locked", server_locked)

        if self.__reboot_in_progress():
            log_debug(3, "Server reboot in progress", self.server_id)
            rhnSQL.commit()
            return ""

        ret = {}
        # get the action. Status codes are currently:
        # 0 Queued # 1 Picked Up # 2 Completed # 3 Failed
        # XXX: we should really be using labels from rhnActionType instead of
        #      hard coded type id numbers.
        # We fetch actions whose prerequisites have completed, and actions
        # that don't have prerequisites at all
        h = rhnSQL.prepare(self._query_queue_get)

        should_execute = 1

        # Loop to get a valid action
        # (only one valid action will be dealt with per execution of this function...)
        while 1:
            if should_execute:
                h.execute(server_id=self.server_id)
                should_execute = 0

            # Okay, got an action
            action = h.fetchone_dict()
            if not action:  # No actions available; bail out
                # Don't forget the commit at the end...
                ret = ""
                break
            action_id = action['id']
            log_debug(4, "Checking action %s" % action_id)
            # okay, now we have the action - process it.
            if action['remaining_tries'] < 1:
                log_debug(4, "Action %s picked up too many times" % action_id)
                # We've run out of pickup attempts for this action...
                self.__update_action(action_id, status=3,
                                     message="This action has been picked up multiple times "
                                     "without a successful transaction; "
                                     "this action is now failed for this system.")
                # Invalidate actions that depend on this one
                self._invalidate_child_actions(action_id)
                # keep looking for a good action to process...
                continue

            if server_locked and action['unlocked_only'] == 'Y':
                # This action is locked
                log_debug(4, "server id %s locked for action id %s" % (
                    self.server_id, action_id))
                continue

            try:
                if version == 1:
                    ret = self.__getV1(action)
                else:
                    ret = self.__getV2(action)
            except ShadowAction:  # Action the client should not see
                e = sys.exc_info()[1]
                # Make sure we re-execute the query, so we pick up whatever
                # extra actions were added
                should_execute = 1
                text = e.args[0]
                log_debug(4, "Shadow Action", text)
                self.__update_action(action['id'], 2, 0, text)
                continue
            except InvalidAction:  # This is an invalid action
                e = sys.exc_info()[1]
                # Update its status so it won't bother us again
                text = e.args[0]
                log_debug(4, "Invalid Action", text)
                self.__update_action(action['id'], 3, -99, text)
                continue
            except EmptyAction:
                e = sys.exc_info()[1]
                # this means that we have some sort of internal error
                # which gets reported in the logs. We don't touch the
                # action because this should get fixed on our side.
                log_error("Can not process action data", action, e.args)
                ret = ""
                break
            else:  # all fine
                # Update the status of the action
                h = rhnSQL.prepare("""
                update rhnServerAction
                    set status = 1,
                        pickup_time = current_timestamp,
                        remaining_tries = :tries - 1
                where action_id = :action_id
                  and server_id = :server_id
                """)
                h.execute(action_id=action["id"], server_id=self.server_id,
                          tries=action["remaining_tries"])
                break

        # commit all changes
        rhnSQL.commit()

        return ret
Ejemplo n.º 46
0
 def update_client_message_received(self, jid):
     jid = str(jid)
     state_id = self._get_push_state_id('online')
     h = rhnSQL.prepare(self._query_update_client_message_received)
     ret = h.execute(jid=jid, state_id=state_id)
     rhnSQL.commit()
Ejemplo n.º 47
0
def push_package(a_pkg,
                 org_id=None,
                 force=None,
                 channels=[],
                 relative_path=None):
    """Uploads a package"""

    # First write the package to the filesystem to final location
    try:
        importLib.move_package(a_pkg.payload_stream.name,
                               basedir=CFG.MOUNT_POINT,
                               relpath=relative_path,
                               checksum_type=a_pkg.checksum_type,
                               checksum=a_pkg.checksum,
                               force=1)
    except OSError:
        e = sys.exc_info()[1]
        raise_with_tb(rhnFault(50, "Package upload failed: %s" % e),
                      sys.exc_info()[2])
    except importLib.FileConflictError:
        raise_with_tb(rhnFault(50, "File already exists"), sys.exc_info()[2])
    except:
        raise_with_tb(rhnFault(50, "File error"), sys.exc_info()[2])

    pkg = mpmSource.create_package(a_pkg.header,
                                   size=a_pkg.payload_size,
                                   checksum_type=a_pkg.checksum_type,
                                   checksum=a_pkg.checksum,
                                   relpath=relative_path,
                                   org_id=org_id,
                                   header_start=a_pkg.header_start,
                                   header_end=a_pkg.header_end,
                                   channels=channels)

    batch = importLib.Collection()
    batch.append(pkg)

    backend = SQLBackend()

    if force:
        upload_force = 4
    else:
        upload_force = 0
    importer = packageImport.packageImporter(batch,
                                             backend,
                                             source=a_pkg.header.is_source,
                                             caller="server.app.uploadPackage")
    importer.setUploadForce(upload_force)
    importer.run()

    package = batch[0]
    log_debug(5, "Package diff", package.diff)

    if package.diff and not force and package.diff.level > 1:
        # Packages too different; bail out
        log_debug(1, "Packages too different", package.toDict(), "Level:",
                  package.diff.level)
        pdict = package.toDict()
        orig_path = package['path']
        orig_path = os.path.join(CFG.MOUNT_POINT, orig_path)
        log_debug(4, "Original package", orig_path)

        # MPMs do not store their headers on disk, so we must avoid performing
        # operations which rely on information only contained in the headers
        # (such as header signatures).
        if os.path.exists(orig_path) and a_pkg.header.packaging != 'mpm':
            oh = rhn_pkg.get_package_header(orig_path)
            _diff_header_sigs(a_pkg.header, oh, pdict['diff']['diff'])

        return pdict, package.diff.level

    # Remove any pending scheduled file deletion for this package
    h = rhnSQL.prepare("""
        delete from rhnPackageFileDeleteQueue where path = :path
    """)
    h.execute(path=relative_path)

    if package.diff and not force and package.diff.level:
        # No need to copy it - just the path is modified
        # pkilambi bug#180347
        # case 1:check if the path exists in the db and also on the file system.
        # if it does then no need to copy
        # case2: file exists on file system but path not in db.then add the
        # realtive path in the db based on checksum of the pkg
        # case3: if no file on file system but path exists.then we write the
        # file to file system
        # case4:no file exists on FS and no path in db .then we write both.
        orig_path = package['path']
        orig_path = os.path.join(CFG.MOUNT_POINT, orig_path)
        log_debug(3, "Original package", orig_path)

        # check included to query for source and binary rpms
        h_path_sql = """
            select ps.path path
                from %s ps,
                     rhnChecksumView c
            where
                c.checksum = :csum
            and c.checksum_type = :ctype
            and ps.checksum_id = c.id
            and (ps.org_id = :org_id or
                 (ps.org_id is null and :org_id is null)
                )
            """
        if a_pkg.header.is_source:
            h_package_table = 'rhnPackageSource'
        else:
            h_package_table = 'rhnPackage'
        h_path = rhnSQL.prepare(h_path_sql % h_package_table)
        h_path.execute(ctype=a_pkg.checksum_type,
                       csum=a_pkg.checksum,
                       org_id=org_id)

        rs_path = h_path.fetchall_dict()
        path_dict = {}
        if rs_path:
            path_dict = rs_path[0]

        if os.path.exists(orig_path) and path_dict['path']:
            return {}, 0
        elif not path_dict['path']:
            h_upd = rhnSQL.prepare("""
            update rhnpackage
               set path = :path
            where checksum_id = (
                        select id from rhnChecksumView c
                                 where c.checksum = :csum
                                   and c.checksum_type = :ctype)
            """)
            h_upd.execute(path=relative_path,
                          ctype=a_pkg.checksum_type,
                          csum=a_pkg.checksum)

    # commit the transactions
    rhnSQL.commit()
    if not a_pkg.header.is_source:
        # Process Package Key information
        server_packages.processPackageKeyAssociations(a_pkg.header,
                                                      a_pkg.checksum_type,
                                                      a_pkg.checksum)

    if not a_pkg.header.is_source:
        errataCache.schedule_errata_cache_update(importer.affected_channels)

    log_debug(2, "Returning")
    return {}, 0
Ejemplo n.º 48
0
                'S.5....T   /usr/share/rhn/up2date_client/newlilocfg.pyc',
                'S.5....T   /usr/share/rhn/up2date_client/packageList.pyc',
                'S.5....T   /usr/share/rhn/up2date_client/rhnChannel.pyc',
                'S.5....T   /usr/share/rhn/up2date_client/rhnDefines.pyc',
                'S.5....T   /usr/share/rhn/up2date_client/rhnErrata.pyc',
                'S.5....T   /usr/share/rhn/up2date_client/rhnHardware.pyc',
                'S.5....T   /usr/share/rhn/up2date_client/rhnPackageInfo.pyc',
                'S.5....T   /usr/share/rhn/up2date_client/rpcServer.pyc',
                'S.5....T   /usr/share/rhn/up2date_client/rpmSource.pyc',
                'S.5....T   /usr/share/rhn/up2date_client/rpmUtils.pyc',
                'S.5....T   /usr/share/rhn/up2date_client/translate.pyc',
                'S.5....T   /usr/share/rhn/up2date_client/up2date.pyc',
                'S.5....T   /usr/share/rhn/up2date_client/up2dateAuth.pyc',
                'S.5....T   /usr/share/rhn/up2date_client/up2dateBatch.pyc',
                'S.5....T   /usr/share/rhn/up2date_client/up2dateErrors.pyc',
                'S.5....T   /usr/share/rhn/up2date_client/up2dateLog.pyc',
                'S.5....T   /usr/share/rhn/up2date_client/up2dateMessages.pyc',
                'S.5....T   /usr/share/rhn/up2date_client/up2dateUtils.pyc',
                'S.5....T   /usr/share/rhn/up2date_client/wrapper.pyc',
                'S.5.X..T   /usr/share/rhn/up2date_client/wrapperUtils.pyc',
                '.....UG.   /var/spool/up2date',
                '.....UG.   /var/spool/up2date',
            ]],
        ],
    })
except:
    rhnSQL.rollback()
    raise

rhnSQL.commit()
Ejemplo n.º 49
0
def process_package_data():
    if debug:
        log = rhnLog('/var/log/rhn/update-packages.log', 5)

    _get_path_sql = rhnSQL.prepare(_get_path_query)
    _update_package_path = rhnSQL.prepare(_update_pkg_path_query)

    _get_path_sql.execute()
    paths = _get_path_sql.fetchall_dict()

    if not paths:
        # Nothing to change
        return
    if verbose:
        print "Processing %s packages" % len(paths)
    pb = ProgressBar(prompt='standby: ', endTag=' - Complete!', \
                     finalSize=len(paths), finalBarLength=40, stream=sys.stdout)
    pb.printAll(1)
    skip_list = []
    new_ok_list = []
    i = 0
    for path in paths:
        pb.addTo(1)
        pb.printIncrement()
        old_path_nvrea = path['path'].split('/')
        org_id = old_path_nvrea[1]
        # pylint: disable=W0703
        try:
            nevra = parseRPMFilename(old_path_nvrea[-1])
            if nevra[1] in [None, '']:
                nevra[1] = path['epoch']
        except Exception:
            # probably not an rpm skip
            if debug:
                log.writeMessage("Skipping: %s Not a valid rpm" \
                                  % old_path_nvrea[-1])
            continue
        old_abs_path = os.path.join(CFG.MOUNT_POINT, path['path'])

        checksum_type = path['checksum_type']
        checksum = path['checksum']
        new_path = get_package_path(nevra,
                                    org_id,
                                    prepend=old_path_nvrea[0],
                                    checksum=checksum)
        new_abs_path = os.path.join(CFG.MOUNT_POINT, new_path)

        bad_abs_path = os.path.join(CFG.MOUNT_POINT, \
                   get_package_path(nevra, org_id, prepend=old_path_nvrea[0],
                             omit_epoch = True, checksum=checksum))

        if not os.path.exists(old_abs_path):
            if os.path.exists(new_abs_path):
                new_ok_list.append(new_abs_path)
                if debug:
                    log.writeMessage("File %s already on final path %s" %
                                     (path['path'], new_abs_path))
                old_abs_path = new_abs_path
            elif os.path.exists(bad_abs_path):
                log.writeMessage("File %s found on %s" %
                                 (path['path'], bad_abs_path))
                old_abs_path = bad_abs_path
            else:
                skip_list.append(old_abs_path)
                if debug:
                    log.writeMessage("Missing path %s for package %d" %
                                     (old_abs_path, path['id']))
                continue

        # pylint: disable=W0703
        try:
            hdr = rhn_rpm.get_package_header(filename=old_abs_path)
        except Exception, e:
            msg = "Exception occurred when reading package header %s: %s" % \
                (old_abs_path, str(e))
            print msg
            if debug:
                log.writeMessage(msg)
            rhnSQL.commit()
            sys.exit(1)

        if old_abs_path != new_abs_path:
            new_abs_dir = os.path.dirname(new_abs_path)
            # relocate the package on the filer
            if debug:
                log.writeMessage("Relocating %s to %s on filer" \
                           % (old_abs_path, new_abs_path))
            if not os.path.isdir(new_abs_dir):
                os.makedirs(new_abs_dir)
            shutil.move(old_abs_path, new_abs_path)
            # Clean up left overs
            os.removedirs(os.path.dirname(old_abs_path))
            # make the path readable
            os.chmod(new_abs_path, 0644)

        # Update the db paths
        _update_package_path.execute(the_id= path['id'], \
                             new_path = new_path )
        if debug:
            log.writeMessage("query Executed: update rhnPackage %d to %s" \
                               % ( path['id'], new_path ))
        # Process gpg key ids
        server_packages.processPackageKeyAssociations(hdr, checksum_type,
                                                      checksum)
        if debug:
            log.writeMessage("gpg key info updated from %s" % new_abs_path)
        i = i + 1
        # we need to break the transaction to smaller pieces
        if i % 1000 == 0:
            rhnSQL.commit()
Ejemplo n.º 50
0
def store_client_route(server_id):
    """ Stores the route the client took to get to hosted or the Satellite """

    log_debug(5, server_id)

    # get the old routing information for this server_id
    # oldRoute in this format: [(id0, hostname0),  (id1, hostname1),  ...]
    #                           closest to client, ..., closest to server
    h = rhnSQL.prepare("""
        select position,
               proxy_server_id,
               hostname
          from rhnServerPath
         where server_id = :server_id
        order by position
        """)
    h.execute(server_id=server_id)
    oldRoute = h.fetchall_dict() or []
    newRoute = []

    # code block if there *is* routing info in the headers
    # NOTE: X-RHN-Proxy-Auth described in proxy/broker/rhnProxyAuth.py
    if rhnFlags.test('X-RHN-Proxy-Auth'):
        tokens = string.split(rhnFlags.get('X-RHN-Proxy-Auth'), ',')
        tokens = filter(lambda token: token, tokens)

        log_debug(4, "route tokens", tokens)
        # newRoute in this format: [(id0, hostname0),  (id1, hostname1),  ...]
        #                           closest to client, ..., closest to server
        for token in tokens:
            token, hostname = apacheAuth.splitProxyAuthToken(token)
            if hostname is None:
                log_debug(
                    3,
                    "NOTE: Spacewalk Proxy v1.1 detected - route tracking is unsupported"
                )
                newRoute = []
                break
            newRoute.append((token[0], hostname))

        log_debug(4, "newRoute", newRoute)

    if oldRoute == newRoute:
        # Nothing to do here
        # This also catches the case of no routes at all
        return

    if oldRoute:
        # blow away table rhnServerPath entries for server_id
        log_debug(8, 'blow away route-info for %s' % server_id)
        h = rhnSQL.prepare("""
            delete from rhnServerPath where server_id = :server_id
        """)
        h.execute(server_id=server_id)

    if not newRoute:
        log_debug(3, "No new route to add")
        rhnSQL.commit()
        return

    log_debug(8, 'adding route-info entries: %s - %s' % (server_id, newRoute))

    h = rhnSQL.prepare("""
        insert into rhnServerPath
               (server_id, proxy_server_id, position, hostname)
        values (:server_id, :proxy_server_id, :position, :hostname)
    """)
    server_ids = []
    proxy_ids = []
    proxy_hostnames = []
    positions = []
    counter = 0
    for p in newRoute:
        proxy_id, proxy_hostname = p[:2]
        proxy_ids.append(proxy_id)
        proxy_hostnames.append(proxy_hostname)
        server_ids.append(server_id)
        positions.append(counter)
        counter = counter + 1

    log_debug(5, server_ids, proxy_ids, positions, proxy_hostnames)
    h.executemany(server_id=server_ids,
                  proxy_server_id=proxy_ids,
                  position=positions,
                  hostname=proxy_hostnames)

    rhnSQL.commit()
Ejemplo n.º 51
0
def process_sha256_packages():
    if debug:
        log = rhnLog('/var/log/rhn/update-packages.log', 5)

    _get_sha256_packages_sql = rhnSQL.prepare(_get_sha256_packages_query)
    _get_sha256_packages_sql.execute()
    packages = _get_sha256_packages_sql.fetchall_dict()

    if not packages:
        print "No SHA256 capable packages to process."
        if debug:
            log.writeMessage("No SHA256 capable packages to process.")

        return

    if verbose:
        print "Processing %s SHA256 capable packages" % len(packages)

    pb = ProgressBar(prompt='standby: ', endTag=' - Complete!', \
                     finalSize=len(packages), finalBarLength=40, stream=sys.stdout)
    pb.printAll(1)

    _update_sha256_package_sql = rhnSQL.prepare(_update_sha256_package)
    _update_package_files_sql = rhnSQL.prepare(_update_package_files)

    for package in packages:
        pb.addTo(1)
        pb.printIncrement()

        old_abs_path = os.path.join(CFG.MOUNT_POINT, package['path'])
        if debug and verbose:
            log.writeMessage("Processing package: %s" % old_abs_path)
        temp_file = open(old_abs_path, 'rb')
        header, _payload_stream, _header_start, _header_end = \
                rhnPackageUpload.load_package(temp_file)
        checksum_type = header.checksum_type()
        checksum = getFileChecksum(checksum_type, file_obj=temp_file)

        old_path = package['path'].split('/')
        nevra = parseRPMFilename(old_path[-1])
        org_id = old_path[1]
        new_path = get_package_path(nevra,
                                    org_id,
                                    prepend=old_path[0],
                                    checksum=checksum)
        new_abs_path = os.path.join(CFG.MOUNT_POINT, new_path)

        # Filer content relocation
        try:
            if old_abs_path != new_abs_path:
                if debug:
                    log.writeMessage("Relocating %s to %s on filer" %
                                     (old_abs_path, new_abs_path))

                new_abs_dir = os.path.dirname(new_abs_path)
                if not os.path.isdir(new_abs_dir):
                    os.makedirs(new_abs_dir)

                # link() the old path to the new path
                if not os.path.exists(new_abs_path):
                    os.link(old_abs_path, new_abs_path)
                elif debug:
                    log.writeMessage("File %s already exists" % new_abs_path)

                # Make the new path readable
                os.chmod(new_abs_path, 0644)
        except OSError, e:
            message = "Error when relocating %s to %s on filer: %s" % \
                      (old_abs_path, new_abs_path, str(e))
            print message
            if debug:
                log.writeMessage(message)
            sys.exit(1)

        # Update package checksum in the database
        _update_sha256_package_sql.execute(ctype=checksum_type,
                                           csum=checksum,
                                           path=new_path,
                                           id=package['id'])

        _select_checksum_type_id_sql = rhnSQL.prepare(_select_checksum_type_id)
        _select_checksum_type_id_sql.execute(ctype=checksum_type)
        checksum_type_id = _select_checksum_type_id_sql.fetchone()[0]

        # Update checksum of every single file in a package
        for i, f in enumerate(header['filenames']):
            csum = header['filemd5s'][i]

            # Do not update checksums for directories & links
            if not csum:
                continue

            _update_package_files_sql.execute(ctype_id=checksum_type_id,
                                              csum=csum,
                                              pid=package['id'],
                                              filename=f)

        rhnSQL.commit()

        try:
            if os.path.exists(old_abs_path):
                os.unlink(old_abs_path)
            if os.path.exists(os.path.dirname(old_abs_path)):
                os.removedirs(os.path.dirname(old_abs_path))
        except OSError, e:
            message = "Error when removing %s: %s" % (old_abs_path, str(e))
            print message
            if debug:
                log.writeMessage(message)

            sys.exit(1)
Ejemplo n.º 52
0
def process_package_files():
    def parse_header(header):
        checksum_type = rhn_rpm.RPM_Header(header).checksum_type()
        return mpmSource.create_package(header,
                                        size=0,
                                        checksum_type=checksum_type,
                                        checksum=None,
                                        relpath=None,
                                        org_id=None,
                                        header_start=None,
                                        header_end=None,
                                        channels=[])

    package_name_h = rhnSQL.prepare(package_name_query)

    def package_name(pid):
        package_name_h.execute(pid=pid)
        r = package_name_h.fetchall_dict()[0]
        return "%s-%s.%s" % (r['name'], r['vre'], r['arch'])

    package_repodata_h = rhnSQL.prepare(package_repodata_delete)

    def delete_package_repodata(pid):
        package_repodata_h.execute(pid=pid)

    log = rhnLog('/var/log/rhn/update-packages.log', 5)

    package_query_h = rhnSQL.prepare(package_query)
    package_query_h.execute()

    package_capabilities_h = rhnSQL.prepare(package_capabilities)
    update_packagefile_checksum_h = rhnSQL.prepare(update_packagefile_checksum)
    insert_packagefile_h = rhnSQL.prepare(insert_packagefile)

    while (True):
        row = package_query_h.fetchone_dict()
        if not row:  # No more packages in DB to process
            break

        package_path = os.path.join(CFG.MOUNT_POINT, row['path'])

        if not os.path.exists(package_path):
            if debug:
                log.writeMessage("Package path '%s' does not exist." %
                                 package_path)
            continue

        # pylint: disable=W0703
        try:
            hdr = rhn_rpm.get_package_header(filename=package_path)
        except Exception, e:
            message = "Error when reading package %s header: %s" % (
                package_path, e)
            if debug:
                log.writeMessage(message)
            continue

        pkg_updates = 0
        if row['filecount'] != len(hdr['filenames']):
            # Number of package files on disk and in the DB do not match
            # (possibly a bug #652852). We have to correct them one by one.
            package_capabilities_h.execute(pid=row['id'])
            pkg_caps = {}  # file-name : capabilities dictionary
            for cap in package_capabilities_h.fetchall_dict() or []:
                pkg_caps[cap['name']] = cap

            for f in parse_header(hdr)['files']:
                if pkg_caps.has_key(f['name']):
                    continue  # The package files exists in the DB

                # Insert the missing package file into DB
                insert_packagefile_h.execute(pid=row['id'],
                                             name=f['name'],
                                             ctype=f['checksum_type'],
                                             csum=f['checksum'],
                                             device=f['device'],
                                             inode=f['inode'],
                                             file_mode=f['file_mode'],
                                             username=f['username'],
                                             groupname=f['groupname'],
                                             rdev=f['rdev'],
                                             file_size=f['file_size'],
                                             mtime=f['mtime'],
                                             linkto=f['linkto'],
                                             flags=f['flags'],
                                             verifyflags=f['verifyflags'],
                                             lang=f['lang'])
                pkg_updates += 1

            if debug and pkg_updates:
                log.writeMessage("Package id: %s, name: %s, %s files inserted" % \
                    (row['id'], package_name(row['id']), pkg_updates))
        elif row['nonnullcsums'] == 0:
            # All package files in the DB have null checksum (possibly a bug #659348)
            package_capabilities_h.execute(pid=row['id'])
            pkg_caps = {}  # file-name : capabilities dictionary
            for cap in package_capabilities_h.fetchall_dict() or []:
                pkg_caps[cap['name']] = cap

            for f in parse_header(hdr)['files']:
                if f['checksum'] == '':  # Convert empty string (symlinks) to None to match w/ Oracle returns
                    f['checksum'] = None

                caps = pkg_caps[f['name']]

                if not caps['checksum'] == f['checksum']:
                    # Package file exists, but its checksum in the DB is incorrect
                    update_packagefile_checksum_h.execute(
                        ctype=f['checksum_type'],
                        csum=f['checksum'],
                        pid=caps['package_id'],
                        cid=caps['capability_id'])
                    pkg_updates += 1

            if debug and pkg_updates:
                log.writeMessage("Package id: %s, name: %s, %s checksums updated" % \
                    (row['id'], package_name(row['id']), pkg_updates))

        if pkg_updates:
            log.writeMessage("Package id: %s, purging rhnPackageRepoData" %
                             row['id'])
            delete_package_repodata(row['id'])

        rhnSQL.commit()  # End of a package
Ejemplo n.º 53
0
 def tearDown(self):
     drop_table_query = "DROP TABLE %s" % self.temp_table
     cursor = rhnSQL.prepare(drop_table_query)
     cursor.execute()
     rhnSQL.commit()
Ejemplo n.º 54
0
class ConfigManagement(configFilesHandler.ConfigFilesHandler):
    def __init__(self):
        log_debug(3)
        configFilesHandler.ConfigFilesHandler.__init__(self)
        self.functions.update({
            'management.get_file':
            'management_get_file',
            'management.list_config_channels':
            'management_list_channels',
            'management.create_config_channel':
            'management_create_channel',
            'management.remove_config_channel':
            'management_remove_channel',
            'management.list_file_revisions':
            'management_list_file_revisions',
            'management.list_files':
            'management_list_files',
            'management.has_file':
            'management_has_file',
            'management.put_file':
            'management_put_file',
            'management.remove_file':
            'management_remove_file',
            'management.diff':
            'management_diff',
            'management.get_default_delimiters':
            'management_get_delimiters',
            'management.get_maximum_file_size':
            'management_get_maximum_file_size',
        })
        self.user = None
        self.default_delimiter = '@'

    _query_list_config_channels = rhnSQL.Statement("""
        select cc.name,
               cc.label,
               cct.label channel_type
          from rhnConfigChannelType cct,
               rhnConfigChannel cc
         where cc.org_id = :org_id
           and cc.confchan_type_id = cct.id
           and cct.label = 'normal'
         order by cc.label, cc.name
    """)

    def _get_and_validate_session(self, dict):
        session = dict.get('session')
        self._validate_session(session)

    def management_list_channels(self, dict):
        log_debug(1)
        self._get_and_validate_session(dict)
        return map(
            lambda x: x['label'],
            rhnSQL.fetchall_dict(self._query_list_config_channels,
                                 org_id=self.org_id) or [])

    _query_lookup_config_channel = rhnSQL.Statement("""
        select id
          from rhnConfigChannel
         where org_id = :org_id
           and label = :config_channel
    """)

    def management_create_channel(self, dict):
        log_debug(1)
        self._get_and_validate_session(dict)

        config_channel = dict.get('config_channel')
        # XXX Validate the namespace

        config_channel_name = dict.get('config_channel_name') or config_channel
        config_channel_description = dict.get('description') or config_channel

        row = rhnSQL.fetchone_dict(self._query_lookup_config_channel,
                                   org_id=self.org_id,
                                   config_channel=config_channel)
        if row:
            raise rhnFault(4010,
                           "Configuration channel %s already exists" %
                           config_channel,
                           explain=0)

        insert_call = rhnSQL.Function('rhn_config.insert_channel',
                                      rhnSQL.types.NUMBER())
        config_channel_id = insert_call(self.org_id, 'normal',
                                        config_channel_name, config_channel,
                                        config_channel_description)

        rhnSQL.commit()
        return {}

    _query_config_channel_by_label = rhnSQL.Statement("""
    select id
      from rhnConfigChannel
     where org_id = :org_id
       and label = :label
    """)

    def management_remove_channel(self, dict):
        log_debug(1)
        self._get_and_validate_session(dict)

        config_channel = dict.get('config_channel')
        # XXX Validate the namespace

        row = rhnSQL.fetchone_dict(self._query_config_channel_by_label,
                                   org_id=self.org_id,
                                   label=config_channel)

        if not row:
            raise rhnFault(4009, "Channel not found")

        delete_call = rhnSQL.Procedure('rhn_config.delete_channel')

        try:
            delete_call(row['id'])
        except rhnSQL.SQLError, e:
            errno = e.args[0]
            if errno == 2292:
                raise rhnFault(4005,
                               "Cannot remove non-empty channel %s" %
                               config_channel,
                               explain=0), None, sys.exc_info()[2]
            raise

        log_debug(5, "Removed:", config_channel)
        rhnSQL.commit()
        return ""
Ejemplo n.º 55
0
def token_config_channels(server, tokens_obj):
    assert(isinstance(tokens_obj, ActivationTokens))
    server_id = server['id']

    # If this is a re-registration token, it should not have any config
    # channel associated with it (and no deploy_configs either). We'll just
    # keep whatever config files they had on this profile
    if tokens_obj.is_rereg_token:
        return []

    # Activation key order matters; config channels are stacked in order

    config_channels = []
    config_channels_hash = {}
    deployment = 0
    current_channels = []
    if tokens_obj.forget_rereg_token:
        current_channels = _get_current_config_channels(server_id)

    for token in tokens_obj.tokens:
        channels = _get_token_config_channels(token['token_id'])
        # Check every token used and if any of them are set to not deploy configs
        # then we won't deploy configs for any config channels the system is subscribed to
        deploy_configs = token['deploy_configs']
        log_debug(2, "token_id: ", token['token_id'], " deploy_configs: ", deploy_configs)
        if deploy_configs == 'Y':
            log_debug(2, "At least one token set to deploy config files")
            deployment = 1
        for c in channels:
            config_channel_id = c['config_channel_id']
            if not c['config_channel_id'] in current_channels and\
                    not config_channels_hash.has_key(config_channel_id):
                position = len(current_channels) + len(config_channels) + 1
                # Update the position in the queue
                c['position'] = position
                config_channels.append(c)
                config_channels_hash[config_channel_id] = None

    ret = []
    if config_channels:
        h = rhnSQL.prepare(_query_set_server_config_channels)

        h.execute_bulk({
            'server_id': [server_id] * len(config_channels),
            'config_channel_id': map(lambda c: c['config_channel_id'],
                                     config_channels),
            'position': map(lambda c: c['position'], config_channels),
        })

        for channel in config_channels:
            msg = "Subscribed to config channel %s" % channel['name']
            log_debug(4, msg)
            ret.append(msg)

    # Now that we have the server subscribed to config channels,
    # determine if we have to deploy the files too
    # Don't pass tokens_obj, we only need the token that provided the config
    # channels in the first place
    if deployment:
        log_debug(2, "At least one token has deploy_configs == Y, deploying configs")
        deploy_configs_if_needed(server)

    rhnSQL.commit()

    return ret
Ejemplo n.º 56
0
    def _channelPackageSubscription(self, authobj, info):
        # Authorize the org id passed
        authobj.authzOrg(info)

        packageList = info.get('packages') or []
        if not packageList:
            log_debug(1, "No packages found; done")
            return 0

        if 'channels' not in info or not info['channels']:
            log_debug(1, "No channels found; done")
            return 0

        channelList = info['channels']
        authobj.authzChannels(channelList)

        # Have to turn the channel list into a list of Channel objects
        channelList = [Channel().populate({'label': x}) for x in channelList]

        # Since we're dealing with superusers, we allow them to change the org
        # id
        # XXX check if we don't open ourselves too much (misa 20030422)
        org_id = info.get('orgId')
        if org_id == '':
            org_id = None

        batch = Collection()
        package_keys = ['name', 'version', 'release', 'epoch', 'arch']
        for package in packageList:
            for k in package_keys:
                if k not in package:
                    raise Exception("Missing key %s" % k)
                if k == 'epoch':
                    if package[k] is not None:
                        if package[k] == '':
                            package[k] = None
                        else:
                            package[k] = str(package[k])
                else:
                    package[k] = str(package[k])

            if package['arch'] == 'src' or package['arch'] == 'nosrc':
                # Source package - no reason to continue
                continue
            _checksum_sql_filter = ""
            if 'md5sum' in package:  # for old rhnpush compatibility
                package['checksum_type'] = 'md5'
                package['checksum'] = package['md5sum']

            exec_args = {
                'name': package['name'],
                'pkg_epoch': package['epoch'],
                'pkg_version': package['version'],
                'pkg_rel': package['release'],
                'pkg_arch': package['arch'],
                'orgid': org_id
            }

            if 'checksum' in package and CFG.ENABLE_NVREA:
                _checksum_sql_filter = """and c.checksum = :checksum
                                          and c.checksum_type = :checksum_type"""
                exec_args.update({
                    'checksum_type': package['checksum_type'],
                    'checksum': package['checksum']
                })

            h = rhnSQL.prepare(self._get_pkg_info_query % _checksum_sql_filter)
            h.execute(**exec_args)
            row = h.fetchone_dict()

            package['checksum_type'] = row['checksum_type']
            package['checksum'] = row['checksum']
            package['org_id'] = org_id
            package['channels'] = channelList
            batch.append(IncompletePackage().populate(package))

        caller = "server.app.channelPackageSubscription"

        backend = SQLBackend()
        importer = ChannelPackageSubscription(batch, backend, caller=caller)
        try:
            importer.run()
        except IncompatibleArchError:
            e = sys.exc_info()[1]
            raise_with_tb(rhnFault(50, string.join(e.args), explain=0),
                          sys.exc_info()[2])
        except InvalidChannelError:
            e = sys.exc_info()[1]
            raise_with_tb(rhnFault(50, str(e), explain=0), sys.exc_info()[2])

        affected_channels = importer.affected_channels

        log_debug(3, "Computing errata cache for systems affected by channels",
                  affected_channels)

        schedule_errata_cache_update(affected_channels)
        rhnSQL.commit()

        return 0
Ejemplo n.º 57
0
 def _set_state(self, jid, state_id):
     h = rhnSQL.prepare(self._query_set_state)
     h.execute(state_id=state_id, jid=str(jid))
     rhnSQL.commit()
Ejemplo n.º 58
0
    def run(self):
        channels = []
        if self.all:
            channels = list(rhnSQL.Table("RHNCHANNEL", "LABEL").keys())
        else:
            channels = [self.channel]

        for c in channels:
            _printLog("Remove old patches in channel '%s'" % c)
            # search errata which ends with channel-* in this channel
            h = rhnSQL.prepare("""
                SELECT e.id as errata_id,
                       e.advisory,
                       e.advisory_rel,
                       c.id as channel_id,
                       ca.label channel_arch_label
                  FROM rhnErrata e
                  JOIN rhnChannelErrata ce ON e.id = ce.errata_id
                  JOIN rhnChannel c ON ce.channel_id = c.id
                  JOIN rhnChannelArch ca ON c.channel_arch_id = ca.id
                 WHERE c.label = :channel
            """)
            h.execute(channel=c)
            patches = h.fetchall_dict() or []
            channel_id = None
            for patch in patches:
                pattern = "-%s-%s-?[0-9]*$" % (patch['advisory_rel'],
                                               patch['channel_arch_label'])
                if not re.search(pattern, patch['advisory']):
                    log_debug(
                        2,
                        "Found new style patch '%s'. Skip" % patch['advisory'])
                    # This is not an old style patch. Skip
                    continue
                errata_id = patch['errata_id']
                channel_id = patch['channel_id']
                log_debug(
                    1, "Remove patch '%s(%d)' from channel '%s(%d)'" %
                    (patch['advisory'], errata_id, c, channel_id))

                # delete channel from errata
                errata_helper.deleteChannelErrata(errata_id, channel_id)

                # search if the errata still has channels
                if errata_helper.errataHasChannels(errata_id):
                    # if yes, work on this patch is finished
                    log_debug(2, "Patch exists in other channels too")
                    continue

                # else we can remove the errta completly
                log_debug(2, "Delete Patch completly")
                errata_helper.deleteErrata(errata_id)

            # if channel_id is still None, no patches were deleted
            # Then is no need to run update_needed_cache for this channel
            if channel_id:
                # Update the errata/package cache for the servers
                #        use procedure rhn_channel.update_needed_cache(channel_id)
                log_debug(2, "Update Server Cache for channel '%s'" % c)
                rhnSQL.commit()
                update_needed_cache = rhnSQL.Procedure(
                    "rhn_channel.update_needed_cache")
                update_needed_cache(channel_id)
                rhnSQL.commit()
            else:
                log_debug(1, "No old style patches found in '%s'" % c)

        _printLog("Finished")
Ejemplo n.º 59
0
    def submit(self, system_id, action_id, result, message="", data={}):
        """ Submit the results of a queue run.
            Maps old and new rhn_check behavior to new database status codes

            The new API uses 4 slightly different status codes than the
            old client does.  This function will "hopefully" sensibly
            map them.  Old methodology:
               -rhn_check retrieves an action from the top of the action queue.
               -It attempts to execute the desired action and returns either
                   (a) 0   -- presumed successful.
                   (b) rhnFault object -- presumed failed
                   (c) some other non-fault object -- *assumed* successful.
               -Regardless of result code, action is marked as "executed"

            We try to make a smarter status selection (i.e. failed||completed).

            For reference:
            New DB status codes:      Old DB status codes:
                  0: Queued               0: queued
                  1: Picked Up            1: picked up
                  2: Completed            2: executed
                  3: Failed               3: completed
        """
        if type(action_id) is not IntType:
            # Convert it to int
            try:
                action_id = int(action_id)
            except ValueError:
                log_error("Invalid action_id", action_id)
                raise_with_tb(rhnFault(30, _("Invalid action value type %s (%s)") %
                               (action_id, type(action_id))), sys.exc_info()[2])
        # Authenticate the system certificate
        self.auth_system(system_id)
        log_debug(1, self.server_id, action_id, result)
        # check that the action is valid
        # We have a uniqueness constraint on (action_id, server_id)
        h = rhnSQL.prepare("""
            select at.label action_type,
                   at.trigger_snapshot,
                   at.name
              from rhnServerAction sa,
                   rhnAction a,
                   rhnActionType at
             where sa.server_id = :server_id
               and sa.action_id = :action_id
               and sa.status = 1
               and a.id = :action_id
               and a.action_type = at.id
        """)
        h.execute(server_id=self.server_id, action_id=action_id)
        row = h.fetchone_dict()
        if not row:
            log_error("Server %s does not own action %s" % (
                self.server_id, action_id))
            raise rhnFault(22, _("Action %s does not belong to server %s") % (
                action_id, self.server_id))

        action_type = row['action_type']
        trigger_snapshot = (row['trigger_snapshot'] == 'Y')

        if 'missing_packages' in data:
            missing_packages = "Missing-Packages: %s" % str(
                data['missing_packages'])
            rmsg = "%s %s" % (message, missing_packages)
        elif 'koan' in data:
            rmsg = "%s: %s" % (message, data['koan'])
        else:
            rmsg = message

        rcode = result
        # Careful with this one, result can be a very complex thing
        # and this processing is required for compatibility with old
        # rhn_check clients
        if type(rcode) == type({}):
            if "faultCode" in result:
                rcode = result["faultCode"]
            if "faultString" in result:
                rmsg = result["faultString"] + str(data)
        if type(rcode) in [type({}), type(()), type([])] \
                or type(rcode) is not IntType:
            rmsg = u"%s [%s]" % (UnicodeType(message), UnicodeType(rcode))
            rcode = -1
        # map to db codes.
        status = self.status_for_action_type_code(action_type, rcode)

        if status == 3:
            # Failed action - invalidate children
            self._invalidate_child_actions(action_id)
        elif action_type == 'reboot.reboot':
            # reboot action should stay as pickup
            rhnSQL.commit()
            return 0
        elif status == 2 and trigger_snapshot and self.__should_snapshot():
            # if action status is 'Completed', snapshot if allowed and if needed
            self.server.take_snapshot("Scheduled action completion:  %s" % row['name'])

        self.__update_action(action_id, status, rcode, rmsg)

        # Store the status in a flag - easier than to complicate the action
        # plugin API by adding a status
        rhnFlags.set('action_id', action_id)
        rhnFlags.set('action_status', status)

        self.process_extra_data(self.server_id, action_id, data=data,
                                action_type=action_type)

        # commit, because nobody else will
        rhnSQL.commit()
        return 0
Ejemplo n.º 60
0
    def import_kickstart(self, plug, repo_label):
        ks_path = 'rhn/kickstart/'
        ks_tree_label = re.sub(r'[^-_0-9A-Za-z@.]', '',
                               repo_label.replace(' ', '_'))
        if len(ks_tree_label) < 4:
            ks_tree_label += "_repo"

        # construct ks_path and check we already have this KS tree synced
        id_request = """
                select id
                from rhnKickstartableTree
                where channel_id = :channel_id and label = :label
                """

        if self.org_id:
            ks_path += str(self.org_id) + '/' + ks_tree_label
            # Trees synced from external repositories are expected to have full path it database
            db_path = os.path.join(CFG.MOUNT_POINT, ks_path)
            row = rhnSQL.fetchone_dict(id_request + " and org_id = :org_id",
                                       channel_id=self.channel['id'],
                                       label=ks_tree_label,
                                       org_id=self.org_id)
        else:
            ks_path += ks_tree_label
            db_path = ks_path
            row = rhnSQL.fetchone_dict(id_request + " and org_id is NULL",
                                       channel_id=self.channel['id'],
                                       label=ks_tree_label)

        treeinfo_path = ['treeinfo', '.treeinfo']
        treeinfo_parser = None
        for path in treeinfo_path:
            log(1, "Trying " + path)
            treeinfo = plug.get_file(
                path, os.path.join(plug.repo.basecachedir, plug.name))
            if treeinfo:
                try:
                    treeinfo_parser = TreeInfoParser(treeinfo)
                    break
                except TreeInfoError:
                    pass

        if not treeinfo_parser:
            log(0, "Kickstartable tree not detected (no valid treeinfo file)")
            return

        if self.ks_install_type is None:
            family = treeinfo_parser.get_family()
            if family == 'Fedora':
                self.ks_install_type = 'fedora18'
            elif family == 'CentOS':
                self.ks_install_type = 'rhel_' + treeinfo_parser.get_major_version(
                )
            else:
                self.ks_install_type = 'generic_rpm'

        fileutils.createPath(os.path.join(CFG.MOUNT_POINT, ks_path))
        # Make sure images are included
        to_download = set()
        for repo_path in treeinfo_parser.get_images():
            local_path = os.path.join(CFG.MOUNT_POINT, ks_path, repo_path)
            # TODO: better check
            if not os.path.exists(local_path) or self.force_kickstart:
                to_download.add(repo_path)

        if row:
            log(
                0,
                "Kickstartable tree %s already synced. Updating content..." %
                ks_tree_label)
            ks_id = row['id']
        else:
            row = rhnSQL.fetchone_dict("""
                select sequence_nextval('rhn_kstree_id_seq') as id from dual
                """)
            ks_id = row['id']

            rhnSQL.execute("""
                       insert into rhnKickstartableTree (id, org_id, label, base_path, channel_id, kstree_type,
                                                         install_type, last_modified, created, modified)
                       values (:id, :org_id, :label, :base_path, :channel_id,
                                 ( select id from rhnKSTreeType where label = :ks_tree_type),
                                 ( select id from rhnKSInstallType where label = :ks_install_type),
                                 current_timestamp, current_timestamp, current_timestamp)""",
                           id=ks_id,
                           org_id=self.org_id,
                           label=ks_tree_label,
                           base_path=db_path,
                           channel_id=self.channel['id'],
                           ks_tree_type=self.ks_tree_type,
                           ks_install_type=self.ks_install_type)

            log(
                0, "Added new kickstartable tree %s. Downloading content..." %
                ks_tree_label)

        insert_h = rhnSQL.prepare("""
                insert into rhnKSTreeFile (kstree_id, relative_filename, checksum_id, file_size, last_modified, created,
                 modified) values (:id, :path, lookup_checksum('sha256', :checksum), :st_size,
                 epoch_seconds_to_timestamp_tz(:st_time), current_timestamp, current_timestamp)
        """)

        delete_h = rhnSQL.prepare("""
                delete from rhnKSTreeFile where kstree_id = :id and relative_filename = :path
        """)

        # Downloading/Updating content of KS Tree
        # start from root dir
        is_root = True
        dirs_queue = ['']
        log(0, "Gathering all files in kickstart repository...")
        while len(dirs_queue) > 0:
            cur_dir_name = dirs_queue.pop(0)
            cur_dir_html = plug.get_file(cur_dir_name)
            if cur_dir_html is None:
                continue

            blacklist = None
            if is_root:
                blacklist = [treeinfo_parser.get_package_dir() + '/']
                is_root = False

            parser = KSDirParser(cur_dir_html, blacklist)

            for ks_file in parser.get_content():
                repo_path = cur_dir_name + ks_file['name']
                # if this is a directory, just add a name into queue (like BFS algorithm)
                if ks_file['type'] == 'DIR':
                    dirs_queue.append(repo_path)
                    continue

                if not os.path.exists(
                        os.path.join(CFG.MOUNT_POINT, ks_path,
                                     repo_path)) or self.force_kickstart:
                    to_download.add(repo_path)

        if to_download:
            log(0, "Downloading %d kickstart files." % len(to_download))
            progress_bar = ProgressBarLogger("Downloading kickstarts:",
                                             len(to_download))
            downloader = ThreadedDownloader(force=self.force_kickstart)
            for item in to_download:
                params = {}
                plug.set_download_parameters(
                    params, item, os.path.join(CFG.MOUNT_POINT, ks_path, item))
                downloader.add(params)
            downloader.set_log_obj(progress_bar)
            downloader.run()
            log2disk(0, "Download finished.")
            for item in to_download:
                st = os.stat(os.path.join(CFG.MOUNT_POINT, ks_path, item))
                # update entity about current file in a database
                delete_h.execute(id=ks_id, path=item)
                insert_h.execute(id=ks_id,
                                 path=item,
                                 checksum=getFileChecksum(
                                     'sha256',
                                     os.path.join(CFG.MOUNT_POINT, ks_path,
                                                  item)),
                                 st_size=st.st_size,
                                 st_time=st.st_mtime)
        else:
            log(0, "No new kickstart files to download.")

        # set permissions recursively
        rhnSQL.commit()