Example #1
0
    def _proxy2server(self):
        hdrs = rhnFlags.get('outputTransportOptions')
        log_debug(3, hdrs)
        size = -1

        # Put the headers into the output connection object
        http_connection = self.responseContext.getConnection()
        for (k, vals) in hdrs.items():
            if k.lower() in ['content_length', 'content-length']:
                try:
                    size = int(vals)
                except ValueError:
                    pass
            if k.lower() in ['content_length', 'content_type']:
                # mod_wsgi modifies incoming headers so we have to transform them back
                k = k.replace('_', '-')
            if not (k.lower()[:2] == 'x-' or
                    k.lower() in [  # all but 'host', and 'via'
                        'accept', 'accept-charset', 'accept-encoding', 'accept-language',
                        'accept-ranges', 'age', 'allow', 'authorization', 'cache-control',
                        'connection', 'content-encoding', 'content-language', 'content-length',
                        'content-location', 'content-md5', 'content-range', 'content-type',
                        'date', 'etag', 'expect', 'expires', 'from', 'if-match',
                        'if-modified-since', 'if-none-match', 'if-range', 'if-unmodified-since',
                        'last-modified', 'location', 'max-forwards', 'pragma', 'proxy-authenticate',
                        'proxy-authorization', 'range', 'referer', 'retry-after', 'server',
                        'te', 'trailer', 'transfer-encoding', 'upgrade', 'user-agent', 'vary',
                        'warning', 'www-authenticate']):
                # filter out header we don't want to send
                continue
            if not isinstance(vals, (ListType, TupleType)):
                vals = [vals]
            for v in vals:
                log_debug(5, "Outgoing header", k, v)
                http_connection.putheader(k, v)
        http_connection.endheaders()

        # Send the body too if there is a body
        if size > 0:
            # reset file to beginning so it can be read again
            self.req.headers_in['wsgi.input'].seek(0, 0)
            if sys.version_info < (2, 6):
                data = self.req.headers_in['wsgi.input'].read(size)
            else:
                data = self.req.headers_in['wsgi.input']
            http_connection.send(data)

        # At this point everything is sent to the server
        # We now wait for the response
        try:
            response = http_connection.getresponse()
        except TimeoutException:
            log_error("Connection timed out")
            return apache.HTTP_GATEWAY_TIME_OUT, None, None
        headers = response.msg
        status = response.status
        # Get the body of the request too - well, just a fd actually
        # in this case, the response object itself.
        bodyFd = response
        return status, headers, bodyFd
Example #2
0
def _verifyProxyAuthToken(auth_token):
    """ verifies the validity of a proxy auth token

        NOTE: X-RHN-Proxy-Auth described in proxy/broker/rhnProxyAuth.py
    """

    log_debug(4, auth_token)
    token, hostname = splitProxyAuthToken(auth_token)
    hostname = hostname.strip()
    ipv4_regex = '^([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\.([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\.([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\.([01]?\\d\\d?|2[0-4]\\d|25[0-5])$'
    # This ipv6 regex was develeoped by Stephen Ryan at Dataware.
    # (http://forums.intermapper.com/viewtopic.php?t=452) It is licenced
    # under a Creative Commons Attribution-ShareAlike 3.0 Unported
    # License, so we are free to use it as long as we attribute it to him.
    ipv6_regex = '^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?$'
    hostname_is_ip_address = re.match(ipv4_regex, hostname) or re.match(ipv6_regex, hostname)

    headers = rhnFlags.get('outputTransportOptions')
    if len(token) < 5:
        # Bad auth information; decline any action
        log_debug(4, "incomplete proxy authentication token: %s"
                  % auth_token)
        headers['X-RHN-Proxy-Auth-Error'] = '%s:%s' % (
            1003, _("incomplete proxy authentication token: %s") % auth_token)
        if not hostname_is_ip_address:
            headers['X-RHN-Proxy-Auth-Origin'] = hostname
        raise rhnFault(1003)  # Invalid session key

    log_debug(5, "proxy auth token: %s,  hostname: %s"
                 % (repr(token), hostname or 'n/a'))

    proxyId, proxyUser, rhnServerTime, expireOffset, signature = token[:5]
    computed = computeSignature(CFG.SECRET_KEY, proxyId, proxyUser,
                                rhnServerTime, expireOffset)

    if computed != signature:
        log_error("Proxy signature failed: proxy id='%s', proxy user='******'" %
                  (proxyId, proxyUser))
        log_debug(4, "Sent proxy signature %s does not match ours %s." % (
            signature, computed))
        headers['X-RHN-Proxy-Auth-Error'] = '%s:%s' % (
            1003, _("Sent proxy signature %s does not match ours %s.") % (
                signature, computed))
        if not hostname_is_ip_address:
            headers['X-RHN-Proxy-Auth-Origin'] = hostname
        raise rhnFault(1003)  # Invalid session key

    # Convert the expiration/time to floats:
    rhnServerTime = float(rhnServerTime)
    expireOffset = float(expireOffset)

    if rhnServerTime + expireOffset < time.time():
        log_debug(4, "Expired proxy authentication token")
        headers['X-RHN-Proxy-Auth-Error'] = '%s:%s' % (1004, "Expired")
        if not hostname_is_ip_address:
            headers['X-RHN-Proxy-Auth-Origin'] = hostname
        raise rhnFault(1004)  # Expired client authentication token

    log_debug(4, "Proxy auth OK: sigs match; not an expired token")
    return 1
Example #3
0
    def _handleServerResponse(self, status):
        """ This method can be overridden by subclasses who want to handle server
            responses in their own way.  By default, we will wrap all the headers up
            and send them back to the client with an error status.  This method
            should return apache.OK if everything went according to plan.
        """
        if (status != apache.HTTP_OK) and (status != apache.HTTP_PARTIAL_CONTENT):
            # Non 200 response; have to treat it differently
            log_debug(2, "Forwarding status %s" % status)
            # Copy the incoming headers to headers_out
            headers = self.responseContext.getHeaders()
            if headers is not None:
                for k in headers.keys():
                    rhnLib.setHeaderValue(self.req.headers_out, k,
                                          self._get_header(k))
            else:
                log_error('WARNING? - no incoming headers found!')
            # And that's that
            return status

        if (status == apache.HTTP_PARTIAL_CONTENT):
            return apache.HTTP_PARTIAL_CONTENT
        else:
            # apache.HTTP_OK becomes apache.OK.
            return apache.OK
Example #4
0
    def reload(self, server, reload_all=0):
        log_debug(4, server, "reload_all = %d" % reload_all)

        if not self.server.load(int(server)):
            log_error("Could not find server record for reload", server)
            raise rhnFault(29, "Could not find server record in the database")
        self.cert = None
        # it is lame that we have to do this
        h = rhnSQL.prepare("""
        select label from rhnServerArch where id = :archid
        """)
        h.execute(archid=self.server["server_arch_id"])
        data = h.fetchone_dict()
        if not data:
            raise rhnException("Found server with invalid numeric "
                               "architecture reference",
                               self.server.data)
        self.archname = data['label']
        # we don't know this one anymore (well, we could look for, but
        # why would we do that?)
        self.user = None

        # XXX: Fix me
        if reload_all:
            if not self.reload_packages_byid(self.server["id"]) == 0:
                return -1
            if not self.reload_hardware_byid(self.server["id"]) == 0:
                return -1
        return 0
Example #5
0
    def entitle(self, server_id, history, virt_type=None):
        """
        Entitle a server according to the entitlements we have configured.
        """
        log_debug(3, self.entitlements)

        entitle_server = rhnSQL.Procedure("rhn_entitlements.entitle_server")
        # TODO: entitle_server calls can_entitle_server, so we're doing this
        # twice for each successful call. Is it necessary for external error
        # handling or can we ditch it?
        can_entitle_server = rhnSQL.Function(
            "rhn_entitlements.can_entitle_server", rhnSQL.types.NUMBER())

        can_ent = None

        history["entitlement"] = ""

        # Do a quick check to see if both virt entitlements are present. (i.e.
        # activation keys stacked together) If so, give preference to the more
        # powerful virtualization platform and remove the regular virt
        # entitlement from the list.
        found_virt = False
        found_virt_platform = False
        for entitlement in self.entitlements:
            if entitlement[0] == VIRT_ENT_LABEL:
                found_virt = True
            elif entitlement[0] == VIRT_PLATFORM_ENT_LABEL:
                found_virt_platform = True

        for entitlement in self.entitlements:
            if virt_type is not None and entitlement[0] in \
                    (VIRT_ENT_LABEL, VIRT_PLATFORM_ENT_LABEL):
                continue

            # If both virt entitlements are present, skip the least powerful:
            if found_virt and found_virt_platform and entitlement[0] == VIRT_ENT_LABEL:
                log_debug(1, "Virtualization and Virtualization Platform " +
                          "entitlements both present.")
                log_debug(1, "Skipping Virtualization.")
                continue

            try:
                can_ent = can_entitle_server(server_id, entitlement[0])
            except rhnSQL.SQLSchemaError, e:
                can_ent = 0

            try:
                # bugzilla #160077, skip attempting to entitle if we cant
                if can_ent:
                    entitle_server(server_id, entitlement[0])
            except rhnSQL.SQLSchemaError, e:
                log_error("Token failed to entitle server", server_id,
                          self.get_names(), entitlement[0], e.errmsg)
                if e.errno == 20220:
                    # ORA-20220: (servergroup_max_members) - Server group membership
                    # cannot exceed maximum membership
                    raise rhnFault(91,
                                   _("Registration failed: RHN Software service entitlements exhausted: %s") % entitlement[0]), None, sys.exc_info()[2]
                # No idea what error may be here...
                raise rhnFault(90, e.errmsg), None, sys.exc_info()[2]
Example #6
0
def token_server_groups(server_id, tokens_obj):
    """ Handle server group subscriptions for the registration token """
    assert(isinstance(tokens_obj, ActivationTokens))
    h = rhnSQL.prepare(_query_token_server_groups)
    server_groups = {}
    for token in tokens_obj.tokens:
        token_id = token['token_id']
        h.execute(token_id=token_id)
        while 1:
            row = h.fetchone_dict()
            if not row:
                break
            server_group_id = row['server_group_id']
            server_groups[server_group_id] = row

    # Now try to subscribe server to group
    ret = []
    for server_group_id, sg in server_groups.items():
        log_debug(4, "token server group", sg)

        try:
            join_server_group(server_id, server_group_id)
        except rhnSQL.SQLError, e:
            log_error("Failed to add server to group", server_id,
                      server_group_id, sg["name"])
            raise rhnFault(80, _("Failed to add server to group %s") %
                           sg["name"]), None, sys.exc_info()[2]
        else:
            ret.append("Subscribed to server group '%s'" % sg["name"])
Example #7
0
def _delete_rpm_group(packageIds):

    references = [
        'rhnChannelPackage',
        'rhnErrataPackage',
        'rhnErrataPackageTMP',
        'rhnPackageChangelogRec',
        'rhnPackageConflicts',
        'rhnPackageFile',
        'rhnPackageObsoletes',
        'rhnPackageProvides',
        'rhnPackageRequires',
        'rhnPackageRecommends',
        'rhnPackageSuggests',
        'rhnPackageSupplements',
        'rhnPackageEnhances',
        'rhnPackageBreaks',
        'rhnPackagePredepends',
        'rhnServerNeededCache',
    ]
    deleteStatement = "delete from %s where package_id = :package_id"
    for table in references:
        h = rhnSQL.prepare(deleteStatement % table)
        count = h.executemany(package_id=packageIds)
        log_debug(3, "Deleted from %s: %d rows" % (table, count))
    deleteStatement = "delete from rhnPackage where id = :package_id"
    h = rhnSQL.prepare(deleteStatement)
    count = h.executemany(package_id=packageIds)
    if count:
        log_debug(2, "DELETED package id %s" % str(packageIds))
    else:
        log_error("No such package id %s" % str(packageIds))
    rhnSQL.commit()
Example #8
0
    def __getXmlrpcServer():
        """ get an xmlrpc server object

            WARNING: if CFG.USE_SSL is off, we are sending info
                     in the clear.
        """
        log_debug(3)

        # build the URL
        url = CFG.RHN_PARENT or ''
        url = parseUrl(url)[1].split(':')[0]
        if CFG.USE_SSL:
            url = 'https://' + url + '/XMLRPC'
        else:
            url = 'http://' + url + '/XMLRPC'
        log_debug(3, 'server url: %s' % url)

        if CFG.HTTP_PROXY:
            serverObj = rpclib.Server(url,
                                      proxy=CFG.HTTP_PROXY,
                                      username=CFG.HTTP_PROXY_USERNAME,
                                      password=CFG.HTTP_PROXY_PASSWORD)
        else:
            serverObj = rpclib.Server(url)
        if CFG.USE_SSL and CFG.CA_CHAIN:
            if not os.access(CFG.CA_CHAIN, os.R_OK):
                log_error('ERROR: missing or cannot access (for ca_chain): %s' % CFG.CA_CHAIN)
                raise rhnFault(1000,
                               _("Spacewalk Proxy error (file access issues). "
                                 "Please contact your system administrator. "
                                 "Please refer to Spacewalk Proxy logs."))
            serverObj.add_trusted_cert(CFG.CA_CHAIN)
        serverObj.add_header('X-RHN-Client-Version', 2)
        return serverObj
Example #9
0
    def autoentitle(self):
        entitlement_hierarchy = ['enterprise_entitled']

        any_base_entitlements = 0

        for entitlement in entitlement_hierarchy:
            try:
                self._entitle(entitlement)
                any_base_entitlements = 1
            except rhnSQL.SQLSchemaError:
                e = sys.exc_info()[1]
                if e.errno == 20287:
                    # ORA-20287: (invalid_entitlement) - The server can not be
                    # entitled to the specified level
                    #
                    # ignore for now, since any_base_entitlements will throw
                    # an error at the end if not set
                    continue

                # Should not normally happen
                log_error("Failed to entitle", self.server["id"], entitlement,
                          e.errmsg)
                raise_with_tb(server_lib.rhnSystemEntitlementException("Unable to entitle"), sys.exc_info()[2])
            except rhnSQL.SQLError:
                e = sys.exc_info()[1]
                log_error("Failed to entitle", self.server["id"], entitlement,
                          str(e))
                raise_with_tb(server_lib.rhnSystemEntitlementException("Unable to entitle"), sys.exc_info()[2])
            else:
                if any_base_entitlements:
                    # All is fine
                    return
                else:
                    raise_with_tb(server_lib.rhnNoSystemEntitlementsException, sys.exc_info()[2])
Example #10
0
    def process(self):
        log_debug(3)
        # nice thing that req has a read() method, so it makes it look just
        # like an fd
        try:
            fd = self.input.decode(self.req)
        except IOError: # client timed out
            return apache.HTTP_BAD_REQUEST

        # Read the data from the request
        _body = fd.read()
        fd.close()

        # In this case, we talk to a client (maybe through a proxy)
        # make sure we have something to decode
        if _body is None or len(_body) == 0:
            return apache.HTTP_BAD_REQUEST

        # Decode the request; avoid logging crappy responses
        try:
            params, method = self.decode(_body)
        except xmlrpclib.ResponseError:
            log_error("Got bad XML-RPC blob of len = %d" % len(_body))
            return apache.HTTP_BAD_REQUEST
        else:
            if params is None:
                params = ()
        # make the actual function call and return the result
        return self.call_function(method, params)
Example #11
0
def exitWithTraceback(e, msg, exitnum, mail=0):
    tbOut = StringIO()
    Traceback(mail, ostream=tbOut, with_locals=1)
    log_error(-1, _('ERROR: %s %s: %s') %
              (e.__class__.__name__, msg, e))
    log_error(-1, _('TRACEBACK: %s') % tbOut.getvalue())
    sys.exit(exitnum)
    def _execute_wrapper(self, function, *p, **kw):
        params = ",".join(["%s: %s" % (repr(key), repr(value)) for key, value in kw.items()])
        log_debug(5, 'Executing SQL: "%s" with bind params: {%s}' % (self.sql, params))
        if self.sql is None:
            raise rhnException("Cannot execute empty cursor")
        if self.blob_map:
            blob_content = {}
            for orig_blob_var in self.blob_map.keys():
                new_blob_var = orig_blob_var + "_blob"
                blob_content[new_blob_var] = kw[orig_blob_var]
                kw[new_blob_var] = self.var(cx_Oracle.BLOB)
                del kw[orig_blob_var]
        modified_params = self._munge_args(kw)

        try:
            retval = apply(function, p, kw)
        except self.OracleError, e:
            ret = self._get_oracle_error_info(e)
            if isinstance(ret, types.StringType):
                raise sql_base.SQLError(self.sql, p, kw, ret), None, sys.exc_info()[2]
            (errno, errmsg) = ret[:2]
            if 900 <= errno <= 999:
                # Per Oracle's documentation, SQL parsing error
                raise sql_base.SQLStatementPrepareError(errno, errmsg, self.sql), None, sys.exc_info()[2]
            if errno == 1475:  # statement needs to be reparsed; force a prepare again
                if self.reparsed:  # useless, tried that already. give up
                    log_error("Reparsing cursor did not fix it", self.sql)
                    args = ("Reparsing tried and still got this",) + tuple(ret)
                    raise sql_base.SQLError(*args), None, sys.exc_info()[2]
                self._real_cursor = self.dbh.prepare(self.sql)
                self.reparsed = 1
                apply(self._execute_wrapper, (function,) + p, kw)
            elif 20000 <= errno <= 20999:  # error codes we know we raise as schema errors
                raise sql_base.SQLSchemaError(*ret), None, sys.exc_info()[2]
            raise apply(sql_base.SQLError, ret), None, sys.exc_info()[2]
Example #13
0
 def connect(self, reconnect=1):
     log_debug(1, "Connecting to database", self.dbtxt)
     self._fix_environment_vars()
     try:
         self.dbh = self._connect()
     except self.OracleError, e:
         ret = self._get_oracle_error_info(e)
         if isinstance(ret, types.StringType):
             raise sql_base.SQLConnectError(self.dbtxt, -1,
                                            "Unable to connect to database", ret), None, sys.exc_info()[2]
         (errno, errmsg) = ret[:2]
         log_error("Connection attempt failed", errno, errmsg)
         if reconnect:
             # we don't try to reconnect blindly.  We have a list of
             # known "good" failure codes that warrant a reconnect
             # attempt
             if errno in [12547]:  # lost contact
                 return self.connect(reconnect=0)
             err_args = [self.dbtxt, errno, errmsg]
             err_args.extend(list(ret[2:]))
             raise sql_base.SQLConnectError(*err_args), None, sys.exc_info()[2]
         # else, this is a reconnect attempt
         raise sql_base.SQLConnectError(*(
             [self.dbtxt, errno, errmsg,
              "Attempting Re-Connect to the database failed", ] + ret[2:])), None, sys.exc_info()[2]
Example #14
0
    def _transformKsRequestForBroker(self, req):

        # Get the checksum for the requested resource from the satellite.

        (status, checksum) = self._querySatelliteForChecksum(req)
        if status != apache.OK or not checksum:
            return status

        # If we got this far, we have the checksum.  Create a new URI based on
        # the checksum.

        newURI = self._generateCacheableKickstartURI(req.uri, checksum)
        if not newURI:
            # Couldn't create a cacheable URI, log an error and revert to
            # BZ 158236 behavior.

            log_error('Could not create cacheable ks URI from "%s"' % req.uri)
            return apache.OK

        # Now we must embed the old URI into a header in the original request
        # so that the SSL Redirect has it available if the resource has not
        # been cached yet.  We will also embed a header that holds the new URI,
        # so that the content handler can use it later.

        log_debug(3, "Generated new kickstart URI: %s" % newURI)
        req.headers_in[HEADER_ACTUAL_URI] = req.uri
        req.headers_in[HEADER_EFFECTIVE_URI] = newURI

        return apache.OK
Example #15
0
def check_password(username, password, service):
    global __username, __password
    auth = PAM.pam()
    auth.start(service, username, __pam_conv)

    # Save the username and passwords in the globals, the conversation
    # function needs access to them
    __username = username
    __password = password

    try:
        try:
            auth.authenticate()
            auth.acct_mgmt()
        finally:
            # Something to be always executed - cleanup
            __username = __password = None
    except PAM.error:
        e = sys.exc_info()[1]
        resp, code = e.args[:2]
        log_error("Password check failed (%s): %s" % (code, resp))
        return 0
    except:
        raise_with_tb(rhnException('Internal PAM error'), sys.exc_info()[2])
    else:
        # Good password
        return 1
 def headerParserHandler(self, req):
     log_setreq(req)
     # init configuration options with proper component
     options = req.get_options()
     # if we are initializing out of a <Location> handler don't
     # freak out
     if not options.has_key("RHNComponentType"):
         # clearly nothing to do
         return apache.OK
     initCFG(options["RHNComponentType"])
     initLOG(CFG.LOG_FILE, CFG.DEBUG)
     if req.method == 'GET':
         # This is the ping method
         return apache.OK
     self.servers = rhnImport.load("upload_server/handlers",
         interface_signature='upload_class')
     if not options.has_key('SERVER'):
         log_error("SERVER not set in the apache config files!")
         return apache.HTTP_INTERNAL_SERVER_ERROR
     server_name = options['SERVER']
     if not self.servers.has_key(server_name):
         log_error("Unable to load server %s from available servers %s" %
             (server_name, self.servers))
         return apache.HTTP_INTERNAL_SERVER_ERROR
     server_class = self.servers[server_name]
     self.server = server_class(req)
     return self._wrapper(req, "headerParserHandler")
Example #17
0
def isAllowedSlave(hostname):
    rhnSQL.initDB()
    if not rhnSQL.fetchone_dict("select 1 from rhnISSSlave where slave = :hostname and enabled = 'Y'",
                                hostname=idn_puny_to_unicode(hostname)):
        log_error('Server "%s" is not enabled for ISS.' % hostname)
        return False
    return True
Example #18
0
    def guest_registered(self, host_sid, guest_sid):
        host_system_slots = server_lib.check_entitlement(host_sid)
        host_system_slots = list(host_system_slots.keys())

        try:
            host_system_slots.remove("virtualization_host")
        except ValueError:
            pass

        guest_system_slots = server_lib.check_entitlement(guest_sid)
        guest_system_slots = list(guest_system_slots.keys())

        for entitlement in host_system_slots:
            if entitlement not in guest_system_slots:
                try:
                    rhnSQL.transaction(entitlement)
                    procedure.rhn_entitlements.entitle_server(guest_sid,
                                                              entitlement)
                except rhnSQL.SQLError:
                    e = sys.exc_info()[1]
                    rhnSQL.rollback(entitlement)
                    log_error("Error adding entitlement %s to host ID-%s: %s"
                              % (entitlement, guest_sid, str(e)))
                    # rhnSQL.rollback()
                    return
Example #19
0
 def notify(self, indexName="server"):
     try:
         client = xmlrpclib.ServerProxy(self.addr)
         result = client.admin.updateIndex(indexName)
     except Exception, e:
         log_error("Failed to notify search service located at %s to update %s indexes" % (self.addr, indexName), e)
         return False
 def guest_migrated(self, old_host_sid, new_host_sid, guest_sid, guest_uuid):
     try:
         procedure.rhn_entitlements.repoll_virt_guest_entitlements(new_host_sid)
     except rhnSQL.SQLError, e:
         log_error("Error adding entitlement: %s" % str(e))
         # rhnSQL.rollback()
         return
Example #21
0
def solve_dependencies(server_id, deps, version, nvre=None):
    """ The unchanged version of solve_dependencies.
        IN:
           server_id := id info of the server
           deps := list of filenames that are needed by the caller
           version := version of the client

        OUT:
           Dictionary with key values being the filnames in deps and the values being a list of lists of package info.
           Example :=  {'filename1'    :   [['name', 'version', 'release', 'epoch'],
                                            ['name2', 'version2', 'release2', 'epoch2']]}
    """
    if not nvre:
        # list of the keys to the values in each row of the recordset.
        nvre = ['name', 'version', 'release', 'epoch']

    # first, uniquify deps
    deplist = set(deps)

    # SQL statement.  It is a union of 3 statements:
    #  - Lookup by package name
    #  - Lookup by provides
    #  - Lookup by file name

    statement = "%s UNION ALL %s UNION ALL %s" % (
        __packages_sql, __provides_sql, __files_sql)
    h = rhnSQL.prepare(statement)

    # prepare return value
    packages = {}
    # Iterate through the dependency problems
    for dep in deplist:
        dict = {}
        h.execute(server_id=server_id, dep=dep)
        rs = h.fetchall_dict() or []
        if not rs:  # test shortcut
            log_error("Unable to solve dependency", server_id, dep)
            packages[dep] = []
            continue

        for p in rs:
            if p['epoch'] is None:
                p['epoch'] = ""
            entry = []
            list(map(lambda f, e=entry, p=p: e.append(p[f]), nvre))

            name_key = entry[0]
            if name_key in dict and dict[name_key][1] < p['preference']:
                # Already have it with a lower preference
                continue
            # The first time we see this package.
            dict[name_key] = (entry, p['preference'])

        packages[dep] = _avoid_compat_packages(dict)

    # v2 clients are done
    if version > 1:
        return packages
    else:
        return _v2packages_to_v1list(packages, deplist)
Example #22
0
def delete_guests(server_id):
    """
    Callback used after a successful kickstart to remove any guest virtual
    instances, as well as their associated servers.
    """
    # First delete all the guest server objects:
    h = rhnSQL.prepare(_query_lookup_guests_for_host)
    h.execute(server_id=server_id)
    delete_server = rhnSQL.Procedure("delete_server")
    log_debug(4, "Deleting guests")
    while 1:
        row = h.fetchone_dict()
        if not row:
            break
        guest_id = row['virtual_system_id']
        log_debug(4, 'Deleting guest server: %s' % guest_id)
        try:
            if guest_id is not None:
                delete_server(guest_id)
        except rhnSQL.SQLError:
            log_error("Error deleting server: %s" % guest_id)

    # Finally delete all the virtual instances:
    log_debug(4, "Deleting all virtual instances for host")
    h = rhnSQL.prepare(_query_delete_virtual_instances)
    h.execute(server_id=server_id)

    # Commit all changes:
    try:
        rhnSQL.commit()
    except rhnSQL.SQLError:
        e = sys.exc_info()[1]
        log_error("Error committing transaction: %s" % e)
        rhnSQL.rollback()
 def repl_func(self, match_object):
     try:
         return self._repl_func(match_object)
     except ValueError:
         e = sys.exc_info()[1]
         log_error("cfg variable interpolation error", e)
         return match_object.group()
 def _wrapper(self, req, function_name):
     #log_debug(1, "_wrapper", req, function_name)
     if not hasattr(self.server, function_name):
         log_error("%s doesn't have a %s function" %
                   (self.server, function_name))
         return apache.HTTP_NOT_FOUND
     function = getattr(self.server, function_name)
     try:
         log_debug(5, "Calling", function)
         ret = function(req)
     except rhnFault:
         e = sys.exc_info()[1]
         log_debug(4, "rhnFault caught: %s" % (e, ))
         error_string = self._exception_to_text(e)
         error_code = e.code
         self._error_to_headers(req.err_headers_out, error_code, error_string)
         ret = rhnFlags.get("apache-return-code")
         if not ret:
             ret = apache.HTTP_INTERNAL_SERVER_ERROR
         req.status = ret
         log_debug(4, "_wrapper %s exited with apache code %s" %
                   (function_name, ret))
     except rhnSession.ExpiredSessionError:
         e = sys.exc_info()[1]
         # if session expires we catch here and return a forbidden
         # abd make it re-authenticate
         log_debug(4, "Expire Session Error Caught: %s" % (e, ))
         return 403
     except:
         Traceback("server.apacheUploadServer._wrapper", req=req)
         log_error("Unhandled exception")
         return apache.HTTP_INTERNAL_SERVER_ERROR
     return ret
Example #25
0
 def auth_system(self, system_id):
     """ System authentication. We override the standard function because
         we need to check additionally if this system_id is entitled for
         proxy functionality.
     """
     log_debug(3)
     server = rhnHandler.auth_system(self, system_id)
     # if it did not blow up, we have a valid server. Check proxy
     # entitlement.
     # XXX: this needs to be moved out of the rhnServer module,
     # possibly in here
     h = rhnSQL.prepare("""
     select 1
     from rhnProxyInfo pi
     where pi.server_id = :server_id
     """)
     h.execute(server_id = self.server_id)
     row = h.fetchone_dict()
     if not row:
         # we require entitlement for this functionality
         log_error("Server not entitled for Proxy", self.server_id)
         raise rhnFault(1002, _(
             'Spacewalk Proxy service not enabled for server profile: "%s"')
                        % server.server["name"])
     # we're fine...
     return server
Example #26
0
    def getPackageErratum(self, system_id, pkg):
        """ Clients v2+ - Get errata for a package given [n,v,r,e,a,...] format

            Sing-along: You say erratum(sing), I say errata(pl)! :)
            IN:  pkg:   [n,v,r,e,s,a,ch,...]
            RET: a hash by errata that applies to this package
        """
        log_debug(5, system_id, pkg)
        if type(pkg) != type([]) or len(pkg) < 7:
            log_error("Got invalid package specification: %s" % str(pkg))
            raise rhnFault(30, _("Expected a package, not: %s") % pkg)
        # Authenticate and decode server id.
        self.auth_system(system_id)
        # log the entry
        log_debug(1, self.server_id, pkg)
        # Stuff the action in the headers:
        transport = rhnFlags.get("outputTransportOptions")
        transport["X-RHN-Action"] = "getPackageErratum"

        name, ver, rel, epoch, arch, size, channel = pkg[:7]
        if epoch in ["", "none", "None"]:
            epoch = None

        # XXX: also, should arch/size/channel ever be used?
        # bug#186996:adding synopsis field to errata info
        # client side changes are needed to access this data.
        h = rhnSQL.prepare(
            """
        select distinct
            e.id            errata_id,
            e.advisory_type errata_type,
            e.advisory      advisory,
            e.topic         topic,
            e.description   description,
            e.synopsis      synopsis
        from
            rhnServerChannel sc,
            rhnChannelPackage cp,
            rhnChannelErrata ce,
            rhnErrata e,
            rhnErrataPackage ep,
            rhnPackage p
        where
            p.name_id = LOOKUP_PACKAGE_NAME(:name)
        and p.evr_id = LOOKUP_EVR(:epoch, :ver, :rel)
        -- map to a channel
        and p.id = cp.package_id
        -- map to an errata as well
        and p.id = ep.package_id
        and ep.errata_id = e.id
        -- the errata and the channel have to be linked
        and e.id = ce.errata_id
        and ce.channel_id = cp.channel_id
        -- and the server has to be subscribed to the channel
        and cp.channel_id = sc.channel_id
        and sc.server_id = :server_id
        """
        )  # " emacs sucks
        h.execute(name=name, ver=ver, rel=rel, epoch=epoch, server_id=str(self.server_id))
        return self._sanitize_result(h)
Example #27
0
    def entitle(self, server_id, history, virt_type=None):
        """
        Entitle a server according to the entitlements we have configured.
        """
        log_debug(3, self.entitlements)

        entitle_server = rhnSQL.Procedure("rhn_entitlements.entitle_server")
        # TODO: entitle_server calls can_entitle_server, so we're doing this
        # twice for each successful call. Is it necessary for external error
        # handling or can we ditch it?
        can_entitle_server = rhnSQL.Function(
            "rhn_entitlements.can_entitle_server", rhnSQL.types.NUMBER())

        can_ent = None

        history["entitlement"] = ""

        for entitlement in self.entitlements:
            if virt_type is not None and entitlement[0] == VIRT_ENT_LABEL:
                continue

            try:
                can_ent = can_entitle_server(server_id, entitlement[0])
            except rhnSQL.SQLSchemaError, e:
                can_ent = 0

            try:
                # bugzilla #160077, skip attempting to entitle if we cant
                if can_ent:
                    entitle_server(server_id, entitlement[0])
            except rhnSQL.SQLSchemaError, e:
                log_error("Token failed to entitle server", server_id,
                          self.get_names(), entitlement[0], e.errmsg)
                #No idea what error may be here...
                raise rhnFault(90, e.errmsg), None, sys.exc_info()[2]
Example #28
0
    def setup_config(self, config, force=0):
        # Figure out the log level
        debug_level = self.options.verbose
        if debug_level is None:
            debug_level = CFG.debug
        self.debug_level = debug_level
        logfile = self.options.logfile
        if logfile is None or logfile == '':
            logfile = CFG.log_file
        initLOG(level=debug_level, log_file=logfile)

        # Get the ssl cert
        ssl_cert = CFG.osa_ssl_cert
        try:
            self.check_cert(ssl_cert)
        except jabber_lib.InvalidCertError:
            e = sys.exc_info()[1]
            log_error("Invalid SSL certificate:", e)
            return 1

        self.ssl_cert = ssl_cert

        rhnSQL.initDB()

        self._username = '******'
        self._password = self.get_dispatcher_password(self._username)
        if not self._password:
            self._password = self.create_dispatcher_password(32)
        self._resource = 'superclient'
        js = config.get('jabber_server')
        self._jabber_servers = [ idn_ascii_to_puny(js) ]
Example #29
0
    def getPackagePath(self, pkgFilename, redirect=0):
        """ OVERLOADS getPackagePath in common/rhnRepository.
            Returns complete path to an RPM file.
        """

        log_debug(3, pkgFilename)
        mappingName = "package_mapping:%s:" % self.channelName
        pickledMapping = self._cacheObj(mappingName, self.channelVersion,
                                        self.__channelPackageMapping, ())

        mapping = cPickle.loads(pickledMapping)

        # If the file name has parameters, it's a different kind of package.
        # Determine the architecture requested so we can construct an
        # appropriate filename.
        if type(pkgFilename) == types.ListType:
            arch = pkgFilename[3]
            if isSolarisArch(arch):
                pkgFilename = "%s-%s-%s.%s.pkg" % \
                    (pkgFilename[0],
                     pkgFilename[1],
                     pkgFilename[2],
                     pkgFilename[3])

        if not mapping.has_key(pkgFilename):
            log_error("Package not in mapping: %s" % pkgFilename)
            raise rhnFault(17, _("Invalid RPM package requested: %s")
                                 % pkgFilename)
        filePath = "%s/%s" % (CFG.PKG_DIR, mapping[pkgFilename])
        log_debug(4, "File path", filePath)
        if not os.access(filePath, os.R_OK):
            log_debug(4, "Package not found locally: %s" % pkgFilename)
            raise NotLocalError(filePath, pkgFilename)
        return filePath
Example #30
0
 def connect(self, reconnect=1):
     log_debug(1, "Connecting to database", self.dbtxt)
     self._fix_environment_vars()
     try:
         self.dbh = self._connect()
     except self.OracleError:
         e = sys.exc_info()[1]
         ret = self._get_oracle_error_info(e)
         if isinstance(ret, usix.StringType):
             raise_with_tb(sql_base.SQLConnectError(self.dbtxt, -1,
                                            "Unable to connect to database", ret), sys.exc_info()[2])
         (errno, errmsg) = ret[:2]
         log_error("Connection attempt failed", errno, errmsg)
         if reconnect:
             # we don't try to reconnect blindly.  We have a list of
             # known "good" failure codes that warrant a reconnect
             # attempt
             if errno in [12547]:  # lost contact
                 return self.connect(reconnect=0)
             err_args = [self.dbtxt, errno, errmsg]
             err_args.extend(list(ret[2:]))
             raise_with_tb(sql_base.SQLConnectError(*err_args), sys.exc_info()[2])
         # else, this is a reconnect attempt
         raise sql_base.SQLConnectError(*(
             [self.dbtxt, errno, errmsg,
              "Attempting Re-Connect to the database failed", ] + ret[2:])).with_traceback(sys.exc_info()[2])
     dbh_id = id(self.dbh)
     # Reset the statement cache for this database connection
     self._cursor_class._cursor_cache[dbh_id] = {}
Example #31
0
class Queue(rhnHandler):
    """ XMLRPC queue functions that we will provide for the outside world. """
    def __init__(self):
        """ Add a list of functions we are willing to server out. """
        rhnHandler.__init__(self)
        self.functions.append('get')
        self.functions.append('get_future_actions')
        self.functions.append('length')
        self.functions.append('submit')

        # XXX I am not proud of this. There should be a generic way to map
        # the client's error codes into success status codes
        self.action_type_completed_codes = {
            'errata.update': {
                39: None,
            },
        }

    def __getV1(self, action):
        """ Fetches old queued actions for the client version 1. """
        log_debug(3, self.server_id)
        actionId = action['id']
        method = action["method"]
        if method == 'packages.update':
            xml = self.__packageUpdate(actionId)
        elif method == 'errata.update':
            xml = self.__errataUpdate(actionId)
        elif method == 'hardware.refresh_list':
            xml = xmlrpclib.dumps(("hardware", ), methodname="client.refresh")
        elif method == 'packages.refresh_list':
            xml = xmlrpclib.dumps(("rpmlist", ), methodname="client.refresh")
        else:  # Unrecognized, skip
            raise InvalidAction("Action method %s unsupported by "
                                "Update Agent Client" % method)
        # all good
        return {'id': actionId, 'version': 1, 'action': xml}

    def __getV2(self, action, dry_run=0):
        """ Fetches queued actions for the clients version 2+. """
        log_debug(3, self.server_id)
        # Get the root dir of this install
        try:
            method = getMethod.getMethod(action['method'], 'server.action')
        except getMethod.GetMethodException:
            Traceback("queue.get V2")
            raise EmptyAction("Could not get a valid method for %s" %
                              (action['method'], )), None, sys.exc_info()[2]
        # Call the method
        result = method(self.server_id, action['id'], dry_run)
        if result is None:
            # None are mapped to the empty list
            result = ()
        elif not isinstance(result, TupleType):
            # Everything other than a tuple is wrapped in a tuple
            result = (result, )

        xmlblob = xmlrpclib.dumps(result, methodname=action['method'])
        log_debug(5, "returning xmlblob for action", xmlblob)
        return {
            'id': action['id'],
            'action': xmlblob,
            'version': action['version'],
        }

    def __update_status(self, status):
        """ Update the runnng kernel and the last boot values for this
            server from the status dictionary passed on queue checkin.

            Record last running kernel and uptime.  Only update
            last_boot if it has changed by more than five seconds. We
            don't know the timezone the server is in. or even if its
            clock is right, but we do know it can properly track seconds
             since it rebooted, and use our own clocks to keep proper
            track of the actual time.
        """

        rhnSQL.set_log_auth_login('CLIENT')
        if status.has_key('uname'):
            kernelver = status['uname'][2]
            if kernelver != self.server.server["running_kernel"]:
                self.server.server["running_kernel"] = kernelver

        # XXX:We should be using Oracle's sysdate() for this management
        # In the case of multiple app servers in mutiple time zones all the
        # results are skewed.
        if status.has_key('uptime'):
            uptime = status['uptime']
            if isinstance(uptime, type([])) and len(uptime):
                # Toss the other values. For now
                uptime = uptime[0]
                try:
                    uptime = float(uptime)
                except ValueError:
                    # Wrong value passed by the client
                    pass
                else:
                    last_boot = time.time() - uptime
                    if abs(last_boot - self.server.server["last_boot"]) > 5:
                        self.server.server["last_boot"] = last_boot
                        self.__set_reboot_action_to_succcess()

        # this is smart enough to do a NOOP if nothing changed.
        self.server.server.save()

    def __set_reboot_action_to_succcess(self):
        h = rhnSQL.prepare("""
            update rhnServerAction
            set status = 2
            where server_id = :server_id
            and action_id in (
                    select sa.action_id
                    from rhnServerAction sa
                    join rhnAction a on sa.action_id = a.id
                    join rhnActionType at on a.action_type = at.id
                   where sa.server_id = :server_id
                     and sa.status = 1
                     and at.label = 'reboot.reboot'
            )
        """)
        h.execute(server_id=self.server_id)

    def __should_snapshot(self):
        log_debug(4, self.server_id, "determining whether to snapshot...")

        entitlements = self.server.check_entitlement()
        if not entitlements.has_key("enterprise_entitled"):
            return 0

        # ok, take the snapshot before attempting this action
        return 1

    def _invalidate_child_actions(self, action_id):
        f_action_ids = rhnAction.invalidate_action(self.server_id, action_id)
        for f_action_id in f_action_ids:
            # Invalidate any kickstart session that depends on this action
            server_kickstart.update_kickstart_session(self.server_id,
                                                      f_action_id,
                                                      action_status=3,
                                                      kickstart_state='failed',
                                                      next_action_type=None)
        return f_action_ids

    def _invalidate_failed_prereq_actions(self):
        h = rhnSQL.prepare("""
            select sa.action_id, a.prerequisite
              from rhnServerAction sa, rhnAction a
             where sa.server_id = :server_id
               and sa.action_id = a.id
               and sa.status in (0, 1) -- Queued or picked up
               and a.prerequisite is not null
               and exists (
                   select 1
                     from rhnServerAction
                    where server_id = sa.server_id
                      and action_id = a.prerequisite
                      and status = 3 -- failed
               )
        """)

        h.execute(server_id=self.server_id)
        while 1:
            row = h.fetchone_dict()
            if not row:
                break

            action_id, prereq_action_id = row['action_id'], row['prerequisite']

            self._invalidate_child_actions(action_id)

    _query_future_enabled = rhnSQL.Statement("""
        select staging_content_enabled
          from rhnOrgConfiguration oc,
               rhnServer s
         where s.org_id = oc.org_id
           and s.id = :server_id
    """)

    def _future_actions_enabled(self):
        """ Returns true if staging content is enabled for this system """
        h = rhnSQL.prepare(self._query_future_enabled)
        h.execute(server_id=self.server_id)
        row = h.fetchone_dict()
        log_debug(4, row["staging_content_enabled"])
        return row["staging_content_enabled"] == "Y"

    _query_queue_future = rhnSQL.Statement("""
                    select sa.action_id id, a.version,
                           sa.remaining_tries, at.label method,
                           at.unlocked_only,
                           a.prerequisite
                      from rhnServerAction sa,
                           rhnAction a,
                           rhnActionType at
                     where sa.server_id = :server_id
                       and sa.action_id = a.id
                       and a.action_type = at.id
                       and sa.status in (0, 1) -- Queued or picked up
                       and a.earliest_action <= current_timestamp + numtodsinterval(:time_window * 3600, 'second')  -- Check earliest_action
                       and at.label in ('packages.update', 'errata.update',
                            'packages.runTransaction', 'packages.fullUpdate')
                      order by a.earliest_action, a.prerequisite nulls first, a.id
    """)

    def get_future_actions(self, system_id, time_window):
        """ return actions which are scheduled within next /time_window/ hours """
        self.auth_system(system_id)
        log_debug(3,
                  "Checking for future actions within %d hours" % time_window)
        result = []
        if self._future_actions_enabled() and not self.__reboot_in_progress():
            h = rhnSQL.prepare(self._query_queue_future)
            h.execute(server_id=self.server_id, time_window=time_window)
            action = h.fetchone_dict()
            while action:
                log_debug(5, action)
                result.append(self.__getV2(action, dry_run=1))
                action = h.fetchone_dict()
        return result

    _query_queue_get = rhnSQL.Statement("""
                    select sa.action_id id, a.version,
                           sa.remaining_tries, at.label method,
                           at.unlocked_only,
                           a.prerequisite
                      from rhnServerAction sa,
                           rhnAction a,
                           rhnActionType at
                     where sa.server_id = :server_id
                       and sa.action_id = a.id
                       and a.action_type = at.id
                       and sa.status in (0, 1) -- Queued or picked up
                       and a.earliest_action <= current_timestamp -- Check earliest_action
                       and not exists (
                           select 1
                             from rhnServerAction sap
                            where sap.server_id = :server_id
                              and sap.action_id = a.prerequisite
                              and sap.status != 2 -- completed
                           )
                      order by a.earliest_action, a.prerequisite nulls first, a.id
    """)

    # Probably we need to figure out if we really need to split these two.
    def get(self, system_id, version=1, status={}):
        # Authenticate the system certificate
        if CFG.DISABLE_CHECKINS:
            self.update_checkin = 0
        else:
            self.update_checkin = 1
        self.auth_system(system_id)
        log_debug(1, self.server_id, version,
                  "checkins %s" % ["disabled", "enabled"][self.update_checkin])
        if status:
            self.__update_status(status)

        # Update the capabilities list
        rhnCapability.update_client_capabilities(self.server_id)

        # Invalidate failed actions
        self._invalidate_failed_prereq_actions()

        server_locked = self.server.server_locked()
        log_debug(3, "Server locked", server_locked)

        if self.__reboot_in_progress():
            log_debug(3, "Server reboot in progress", self.server_id)
            rhnSQL.commit()
            return ""

        ret = {}
        # get the action. Status codes are currently:
        # 0 Queued # 1 Picked Up # 2 Completed # 3 Failed
        # XXX: we should really be using labels from rhnActionType instead of
        #      hard coded type id numbers.
        # We fetch actions whose prerequisites have completed, and actions
        # that don't have prerequisites at all
        h = rhnSQL.prepare(self._query_queue_get)

        should_execute = 1

        # Loop to get a valid action
        # (only one valid action will be dealt with per execution of this function...)
        while 1:
            if should_execute:
                h.execute(server_id=self.server_id)
                should_execute = 0

            # Okay, got an action
            action = h.fetchone_dict()
            if not action:  # No actions available; bail out
                # Don't forget the commit at the end...
                ret = ""
                break
            action_id = action['id']
            log_debug(4, "Checking action %s" % action_id)
            # okay, now we have the action - process it.
            if action['remaining_tries'] < 1:
                log_debug(4, "Action %s picked up too many times" % action_id)
                # We've run out of pickup attempts for this action...
                self.__update_action(
                    action_id,
                    status=3,
                    message="This action has been picked up multiple times "
                    "without a successful transaction; "
                    "this action is now failed for this system.")
                # Invalidate actions that depend on this one
                self._invalidate_child_actions(action_id)
                # keep looking for a good action to process...
                continue

            if server_locked and action['unlocked_only'] == 'Y':
                # This action is locked
                log_debug(
                    4, "server id %s locked for action id %s" %
                    (self.server_id, action_id))
                continue

            try:
                if version == 1:
                    ret = self.__getV1(action)
                else:
                    ret = self.__getV2(action)
            except ShadowAction, e:  # Action the client should not see
                # Make sure we re-execute the query, so we pick up whatever
                # extra actions were added
                should_execute = 1
                text = e.args[0]
                log_debug(4, "Shadow Action", text)
                self.__update_action(action['id'], 2, 0, text)
                continue
            except InvalidAction, e:  # This is an invalid action
                # Update its status so it won't bother us again
                text = e.args[0]
                log_debug(4, "Invalid Action", text)
                self.__update_action(action['id'], 3, -99, text)
                continue
            except EmptyAction, e:
                # this means that we have some sort of internal error
                # which gets reported in the logs. We don't touch the
                # action because this should get fixed on our side.
                log_error("Can not process action data", action, e.args)
                ret = ""
                break
Example #32
0
def send(url, sendData=None):
    """Connect to url and return the result as stringIO

    :arg url: the url where the request will be sent
    :kwarg sendData: do a post-request when "sendData" is given.

    Returns the result as stringIO object.

    """
    connect_retries = 10
    try_counter = connect_retries
    timeout = 120
    if CFG.is_initialized() and CFG.has_key('TIMEOUT'):
        timeout = CFG.TIMEOUT
    curl = pycurl.Curl()

    curl.setopt(pycurl.CONNECTTIMEOUT, timeout)
    curl.setopt(pycurl.URL, url)
    curl.setopt(pycurl.DEBUGFUNCTION, _curl_debug)
    curl.setopt(pycurl.VERBOSE, True)
    proxy_url, proxy_user, proxy_pass = get_proxy(url)
    if proxy_url:
        curl.setopt(pycurl.PROXY, proxy_url)
    log_debug(2, "Connect to %s" % url)
    if sendData is not None:
        curl.setopt(pycurl.POSTFIELDS, sendData)
        if (CFG.is_initialized() and CFG.has_key('DISABLE_EXPECT')
                and CFG.DISABLE_EXPECT):
            # disable Expect header
            curl.setopt(pycurl.HTTPHEADER, ['Expect:'])

    # We implement our own redirection-following, because pycurl
    # 7.19 doesn't POST after it gets redirected. Ideally we'd be
    # using pycurl.POSTREDIR here, but that's in 7.21.
    curl.setopt(pycurl.FOLLOWLOCATION, False)

    response = StringIO()
    curl.setopt(pycurl.WRITEFUNCTION, response.write)

    try_counter = connect_retries
    while try_counter:
        try_counter -= 1
        try:
            curl.perform()
        except pycurl.error as e:
            if e.args[0] == 56:  # Proxy requires authentication
                log_debug(2, e.args[1])
                if not (proxy_user and proxy_pass):
                    raise TransferException("Proxy requires authentication, "
                                            "but reading credentials from "
                                            "%s failed." % YAST_PROXY)
                curl.setopt(pycurl.PROXYUSERPWD,
                            "%s:%s" % (proxy_user, proxy_pass))
            elif e.args[0] == 60:
                log_error("Peer certificate could not be authenticated "
                          "with known CA certificates.")
                raise TransferException("Peer certificate could not be "
                                        "authenticated with known CA "
                                        "certificates.")
            else:
                log_error(e.args[1])
                raise

        status = curl.getinfo(pycurl.HTTP_CODE)
        if status == 200 or (URL(url).scheme == "file" and status == 0):
            # OK or file
            break
        elif status in (301, 302):  # redirects
            url = curl.getinfo(pycurl.REDIRECT_URL)
            log_debug(2, "Got redirect to %s" % url)
            curl.setopt(pycurl.URL, url)
    else:
        log_error("Connecting to %s has failed after %s "
                  "tries with HTTP error code %s." %
                  (URL(url).getURL(stripPw=True), connect_retries, status))
        raise TransferException("Connection failed after %s tries with "
                                "HTTP error %s." % (connect_retries, status))

    # StringIO.write leaves the cursor at the end of the file
    response.seek(0)
    return response
Example #33
0
    def dump_channel_packages_short(self,
                                    channel_label,
                                    last_modified,
                                    filepath=None,
                                    validate_channels=False,
                                    send_headers=False,
                                    open_stream=True):
        log_debug(2, channel_label)
        if validate_channels:
            channels = self._validate_channels(channel_labels=[channel_label])
            channel_obj = channels[channel_label]
        else:
            channels = channel_label
            channel_obj = channels
        db_last_modified = int(rhnLib.timestamp(channel_obj['last_modified']))
        last_modified = int(rhnLib.timestamp(last_modified))
        log_debug(3, "last modified", last_modified, "db last modified",
                  db_last_modified)
        if last_modified != db_last_modified:
            raise rhnFault(3013, "The requested channel version does not match"
                           " the upstream version",
                           explain=0)
        channel_id = channel_obj['channel_id']
        if filepath:
            key = filepath
        else:
            key = "xml-channel-packages/rhn-channel-%d.data" % channel_id
        # Try to get everything off of the cache
        val = rhnCache.get(key, compressed=0, raw=1, modified=last_modified)
        if val is None:
            # Not generated yet
            log_debug(4,
                      "Cache MISS for %s (%s)" % (channel_label, channel_id))
            stream = self._cache_channel_packages_short(
                channel_id, key, last_modified)
        else:
            log_debug(4, "Cache HIT for %s (%s)" % (channel_label, channel_id))
            temp_stream = tempfile.TemporaryFile()
            temp_stream.write(bytes(val, encoding="latin-1"))
            temp_stream.flush()
            stream = self._normalize_compressed_stream(temp_stream)

        # Copy the results to the output stream
        # They shold be already compressed if they were requested to be
        # compressed
        buffer_size = 16384
        # Send the HTTP headers - but don't init the compressed stream since
        # we send the data ourselves
        if send_headers:
            self._send_headers(init_compressed_stream=0)
        if open_stream:
            self._raw_stream = open(key, "w")
        while 1:
            buff = stream.read(buffer_size)
            if not buff:
                break
            try:
                self._raw_stream.write(buff)
            except IOError:
                log_error("Client disconnected prematurely")
                self.close()
                raise_with_tb(ClosedConnectionError, sys.exc_info()[2])
        # We're done
        if open_stream:
            self._raw_stream.close()
        return 0
Example #34
0
    def create_update_suse_products(self, sysid, guid, secret, ostarget,
                                    products):
        log_debug(4, sysid, guid, ostarget, products)

        # search, if a suseServer with this guid exists which is not this server
        # this would indicate a re-registration and we need to remove the old rhnServer
        h = rhnSQL.prepare("""
    SELECT
           rhn_server_id as id
      FROM suseServer
     WHERE guid = :guid
       AND rhn_server_id != :sysid
    """)
        h.execute(sysid=sysid, guid=guid)
        d = h.fetchone_dict()
        if d:
            old_sysid = d['id']
            log_debug(1, "Found duplicate server:", old_sysid)
            delete_server = rhnSQL.Procedure("delete_server")
            try:
                if old_sysid != None:
                    delete_server(old_sysid)
            except rhnSQL.SQLError:
                log_error("Error deleting server: %s" % old_sysid)
            # IF we delete rhnServer all reference are deleted too
            #
            # now switch suseServer to new id
            #h = rhnSQL.prepare("""
            #  UPDATE suseServer
            #     SET rhn_server_id = :sysid
            #  WHERE rhn_server_id = :oldsysid
            #""")
            #h.execute(sysid=sysid, oldsysid=old_sysid);

        # remove this guid from suseDelServer list
        h = rhnSQL.prepare("""
      DELETE FROM suseDelServer
      WHERE guid = :guid
    """)
        h.execute(guid=guid)
        #rhnSQL.commit()

        # search if suseServer with ID sysid exists
        h = rhnSQL.prepare("""
      SELECT
        s.rhn_server_id as id,
        s.guid,
        s.secret,
        sot.target as ostarget,
        s.ncc_sync_required
      FROM suseServer s
      LEFT JOIN suseOSTarget sot ON s.ostarget_id = sot.id
      WHERE rhn_server_id = :sysid
    """)
        h.execute(sysid=sysid)
        t = h.fetchone_dict()
        ncc_sync_required = False

        # if not; create new suseServer
        if not t:
            ncc_sync_required = True
            h = rhnSQL.prepare("""
        INSERT INTO suseServer
          (rhn_server_id, guid, secret, ostarget_id)
          values (:sysid, :guid, :secret, 
          (select id from suseOSTarget
           where os = :ostarget))
      """)
            h.execute(sysid=sysid, guid=guid, secret=secret, ostarget=ostarget)
        else:
            # if yes, read values and compare them with the provided data
            # update if needed
            data = {
                'rhn_server_id': sysid,
                'guid': guid,
                'secret': secret,
                'ostarget': ostarget
            }

            if t['guid'] != guid or t['secret'] != secret or t[
                    'ostarget'] != ostarget:
                ncc_sync_required = True
                h = rhnSQL.prepare("""
          UPDATE suseServer
             SET guid = :guid,
                 secret = :secret,
                 ostarget_id = (select id from suseOSTarget where os = :ostarget)
           WHERE rhn_server_id = :rhn_server_id
        """)
                h.execute(*(), **data)
        # check products
        h = rhnSQL.prepare("""
      SELECT
          suse_installed_product_id as id
        FROM suseServerInstalledProduct
       WHERE rhn_server_id = :sysid
    """)
        h.execute(sysid=sysid)
        existing_products = [x['id'] for x in h.fetchall_dict() or []]

        for product in products:
            sipid = self.get_installed_product_id(product)
            if not sipid:
                continue
            if sipid in existing_products:
                existing_products.remove(sipid)
                continue
            h = rhnSQL.prepare("""
        INSERT INTO suseServerInstalledProduct
        (rhn_server_id, suse_installed_product_id)
        VALUES(:sysid, :sipid)
      """)
            h.execute(sysid=sysid, sipid=sipid)
            ncc_sync_required = True

        for pid in existing_products:
            h = rhnSQL.prepare("""
        DELETE from suseServerInstalledProduct
         WHERE rhn_server_id = :sysid
           AND suse_installed_product_id = :pid
      """)
            h.execute(sysid=sysid, pid=pid)
            ncc_sync_required = True

        if ncc_sync_required:
            # If the data have changed, we set the
            # sync_required flag and reset the errors
            # flag to give the registration another try
            h = rhnSQL.prepare("""
        UPDATE suseServer
           SET ncc_sync_required = 'Y',
               ncc_reg_error = 'N'
        WHERE rhn_server_id = :sysid
      """)
            h.execute(sysid=sysid)
Example #35
0
 def add_suse_products(self, suse_products):
     log_debug(1, suse_products)
     if not isinstance(suse_products, dict):
         log_error("argument type is not  hash: %s" % suse_products)
         raise TypeError("This function requires a hash as an argument")
     self.suse_products = suse_products
Example #36
0
            error_code = e.code
            self._error_to_headers(req.err_headers_out, error_code, error_string)
            ret = rhnFlags.get("apache-return-code")
            if not ret:
                ret = apache.HTTP_INTERNAL_SERVER_ERROR
            req.status = ret
            log_debug(4, "_wrapper %s exited with apache code %s" %
                (function_name, ret))
        except rhnSession.ExpiredSessionError, e:
            #if session expires we catch here and return a forbidden
            #abd make it re-authenticate
            log_debug(4, "Expire Session Error Caught: %s" % (e, ))
            return 403
        except:
            Traceback("server.apacheUploadServer._wrapper", req=req)
            log_error("Unhandled exception")
            return apache.HTTP_INTERNAL_SERVER_ERROR
        return ret

    # Adds an error code and error string to the headers passed in
    def _error_to_headers(self, headers, error_code, error_string):
        error_string = string.strip(error_string)
        import base64
        error_string = string.strip(base64.encodestring(error_string))
        for line in map(string.strip, string.split(error_string, '\n')):
            headers.add(self.server.error_header_prefix + '-String', line)
        headers[self.server.error_header_prefix + '-Code'] = str(error_code)

    def _exception_to_text(self, exception):
        return """\
Error Message:
Example #37
0
    def submit(self, system_id, action_id, result, message="", data={}):
        """ Submit the results of a queue run.
            Maps old and new rhn_check behavior to new database status codes

            The new API uses 4 slightly different status codes than the
            old client does.  This function will "hopefully" sensibly
            map them.  Old methodology:
               -rhn_check retrieves an action from the top of the action queue.
               -It attempts to execute the desired action and returns either
                   (a) 0   -- presumed successful.
                   (b) rhnFault object -- presumed failed
                   (c) some other non-fault object -- *assumed* successful.
               -Regardless of result code, action is marked as "executed"

            We try to make a smarter status selection (i.e. failed||completed).

            For reference:
            New DB status codes:      Old DB status codes:
                  0: Queued               0: queued
                  1: Picked Up            1: picked up
                  2: Completed            2: executed
                  3: Failed               3: completed
        """
        if type(action_id) is not IntType:
            # Convert it to int
            try:
                action_id = int(action_id)
            except ValueError:
                log_error("Invalid action_id", action_id)
                raise rhnFault(
                    30,
                    _("Invalid action value type %s (%s)") %
                    (action_id, type(action_id))), None, sys.exc_info()[2]
        # Authenticate the system certificate
        self.auth_system(system_id)
        log_debug(1, self.server_id, action_id, result)
        # check that the action is valid
        # We have a uniqueness constraint on (action_id, server_id)
        h = rhnSQL.prepare("""
            select at.label action_type,
                   at.trigger_snapshot,
                   at.name
              from rhnServerAction sa,
                   rhnAction a,
                   rhnActionType at
             where sa.server_id = :server_id
               and sa.action_id = :action_id
               and sa.status = 1
               and a.id = :action_id
               and a.action_type = at.id
        """)
        h.execute(server_id=self.server_id, action_id=action_id)
        row = h.fetchone_dict()
        if not row:
            log_error("Server %s does not own action %s" %
                      (self.server_id, action_id))
            raise rhnFault(
                22,
                _("Action %s does not belong to server %s") %
                (action_id, self.server_id))

        action_type = row['action_type']
        trigger_snapshot = (row['trigger_snapshot'] == 'Y')

        if data.has_key('missing_packages'):
            missing_packages = "Missing-Packages: %s" % str(
                data['missing_packages'])
            rmsg = "%s %s" % (message, missing_packages)
        elif data.has_key('koan'):
            rmsg = "%s: %s" % (message, data['koan'])
        else:
            rmsg = message

        rcode = result
        # Careful with this one, result can be a very complex thing
        # and this processing is required for compatibility with old
        # rhn_check clients
        if type(rcode) == type({}):
            if result.has_key("faultCode"):
                rcode = result["faultCode"]
            if result.has_key("faultString"):
                rmsg = result["faultString"] + str(data)
        if type(rcode) in [type({}), type(()), type([])] \
                or type(rcode) is not IntType:
            rmsg = u"%s [%s]" % (unicode(message), unicode(rcode))
            rcode = -1
        # map to db codes.
        status = self.status_for_action_type_code(action_type, rcode)

        if status == 3:
            # Failed action - invalidate children
            self._invalidate_child_actions(action_id)
        elif action_type == 'reboot.reboot':
            # reboot action should stay as pickup
            rhnSQL.commit()
            return 0
        elif status == 2 and trigger_snapshot and self.__should_snapshot():
            # if action status is 'Completed', snapshot if allowed and if needed
            self.server.take_snapshot("Scheduled action completion:  %s" %
                                      row['name'])

        self.__update_action(action_id, status, rcode, rmsg)

        # Store the status in a flag - easier than to complicate the action
        # plugin API by adding a status
        rhnFlags.set('action_id', action_id)
        rhnFlags.set('action_status', status)

        self.process_extra_data(self.server_id,
                                action_id,
                                data=data,
                                action_type=action_type)

        # commit, because nobody else will
        rhnSQL.commit()
        return 0
Example #38
0
def _verifyProxyAuthToken(auth_token):
    """ verifies the validity of a proxy auth token

        NOTE: X-RHN-Proxy-Auth described in proxy/broker/rhnProxyAuth.py
    """

    log_debug(4, auth_token)
    token, hostname = splitProxyAuthToken(auth_token)
    hostname = hostname.strip()
    ipv4_regex = '^([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\.([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\.([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\.([01]?\\d\\d?|2[0-4]\\d|25[0-5])$'
    # This ipv6 regex was develeoped by Stephen Ryan at Dataware.
    # (http://forums.intermapper.com/viewtopic.php?t=452) It is licenced
    # under a Creative Commons Attribution-ShareAlike 3.0 Unported
    # License, so we are free to use it as long as we attribute it to him.
    ipv6_regex = '^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?$'
    hostname_is_ip_address = re.match(ipv4_regex, hostname) or re.match(
        ipv6_regex, hostname)

    headers = rhnFlags.get('outputTransportOptions')
    if len(token) < 5:
        # Bad auth information; decline any action
        log_debug(4, "incomplete proxy authentication token: %s" % auth_token)
        headers['X-RHN-Proxy-Auth-Error'] = '%s:%s' % (
            1003, _("incomplete proxy authentication token: %s") % auth_token)
        if not hostname_is_ip_address:
            headers['X-RHN-Proxy-Auth-Origin'] = hostname
        raise rhnFault(1003)  # Invalid session key

    log_debug(
        5, "proxy auth token: %s,  hostname: %s" %
        (repr(token), hostname or 'n/a'))

    proxyId, proxyUser, rhnServerTime, expireOffset, signature = token[:5]
    computed = computeSignature(CFG.SECRET_KEY, proxyId, proxyUser,
                                rhnServerTime, expireOffset)

    if computed != signature:
        log_error("Proxy signature failed: proxy id='%s', proxy user='******'" %
                  (proxyId, proxyUser))
        log_debug(
            4, "Sent proxy signature %s does not match ours %s." %
            (signature, computed))
        headers['X-RHN-Proxy-Auth-Error'] = '%s:%s' % (
            1003, _("Sent proxy signature %s does not match ours %s.") %
            (signature, computed))
        if not hostname_is_ip_address:
            headers['X-RHN-Proxy-Auth-Origin'] = hostname
        raise rhnFault(1003)  # Invalid session key

    # Convert the expiration/time to floats:
    rhnServerTime = float(rhnServerTime)
    expireOffset = float(expireOffset)

    if rhnServerTime + expireOffset < time.time():
        log_debug(4, "Expired proxy authentication token")
        headers['X-RHN-Proxy-Auth-Error'] = '%s:%s' % (1004, "Expired")
        if not hostname_is_ip_address:
            headers['X-RHN-Proxy-Auth-Origin'] = hostname
        raise rhnFault(1004)  # Expired client authentication token

    log_debug(4, "Proxy auth OK: sigs match; not an expired token")
    return 1
Example #39
0
        except redirectException, re:
            log_debug(3,"redirect exception caught",re.path)
            response = re.path

        except rhnFault, f:
            response = f.getxml()
        except rhnSQL.SQLSchemaError, e:
            f = None
            if e.errno == 20200:
                log_debug(2, "User Group Membership EXCEEDED")
                f = rhnFault(43, e.errmsg)
            elif e.errno == 20220:
                log_debug(2, "Server Group Membership EXCEEDED")
                f = rhnFault(44, e.errmsg)
            if not f:
                log_error("rhnSQL.SQLSchemaError caught", e)
                rhnSQL.rollback()
                # generate the traceback report
                Traceback(method, self.req,
                          extra = "SQL Error generated: %s" % e,
                          severity="schema")
                return apache.HTTP_INTERNAL_SERVER_ERROR
            response = f.getxml()
        except rhnSQL.SQLError, e:
            log_error("rhnSQL.SQLError caught", e)
            rhnSQL.rollback()
            Traceback(method, self.req,
                      extra="SQL Error generated: %s" % e,
                      severity="schema")
            return apache.HTTP_INTERNAL_SERVER_ERROR
        except Exception, e:
Example #40
0
    def call_function(self, method, params):
        # short-circuit everything if sending a system-wide message.
        if CFG.SEND_MESSAGE_TO_ALL:
            # Make sure the applet doesn't see the message
            if method == 'applet.poll_status':
                return self.response({
                    'checkin_interval': 3600,
                    'server_status': 'normal'
                })
            if method == 'applet.poll_packages':
                return self.response({'use_cached_copy': 1})

            # Fetch global message being sent to clients if applicable.
            msg = open(CFG.MESSAGE_TO_ALL).read()
            log_debug(3, "Sending message to all clients: %s" % msg)
            # Send the message as a fault.
            response = xmlrpclib.Fault(
                -1, _("IMPORTANT MESSAGE FOLLOWS:\n%s") % msg)
            # and now send everything back
            ret = self.response(response)
            log_debug(4, "Leave with return value", ret)
            return ret

        # req: where the response is sent to
        log_debug(2, method)

        # Now we have the reference, call away
        force_rollback = 1
        try:
            rhnSQL.clear_log_id()
            # now get the function reference and call it
            func = self.method_ref(method)
            response = func(*params)
        except (TypeError, ValueError, KeyError, IndexError, UnknownXML):
            # report exception back to server
            fault = 1

            if sys.version_info[0] == 3:
                exctype = sys.exc_info()[0]
            else:
                exctype = sys.exc_type

            if exctype == UnknownXML:
                fault = -1
            e_type, e_value = sys.exc_info()[:2]
            response = xmlrpclib.Fault(fault, _(
                "While running '%s': caught\n%s : %s\n") % (
                method, e_type, e_value))
            Traceback(method, self.req,
                      extra="Response sent back to the caller:\n%s\n" % (
                          response.faultString,),
                      severity="notification")
        except rhnNotFound:
            e = sys.exc_info()[1]
            return apache.HTTP_NOT_FOUND
        # pkilambi:catch exception if redirect
        except redirectException:
            re = sys.exc_info()[1]
            log_debug(3, "redirect exception caught", re.path)
            response = re.path

        except rhnFault:
            f = sys.exc_info()[1]
            response = f.getxml()
        except rhnSQL.SQLSchemaError:
            e = sys.exc_info()[1]
            f = None
            if e.errno == 20200:
                log_debug(2, "User Group Membership EXCEEDED")
                f = rhnFault(43, e.errmsg)
            if not f:
                log_error("rhnSQL.SQLSchemaError caught", e)
                rhnSQL.rollback()
                # generate the traceback report
                Traceback(method, self.req,
                          extra="SQL Error generated: %s" % e,
                          severity="schema")
                return apache.HTTP_INTERNAL_SERVER_ERROR
            response = f.getxml()
        except rhnSQL.SQLError:
            e = sys.exc_info()[1]
            log_error("rhnSQL.SQLError caught", e)
            rhnSQL.rollback()
            Traceback(method, self.req,
                      extra="SQL Error generated: %s" % e,
                      severity="schema")
            return apache.HTTP_INTERNAL_SERVER_ERROR
        except Exception:
            e = sys.exc_info()[1]
            log_error("Unhandled exception", e)
            rhnSQL.rollback()
            # otherwise we do a full stop
            Traceback(method, self.req, severity="unhandled")
            return apache.HTTP_INTERNAL_SERVER_ERROR
        else:
            # if no exception, we don't need to rollback
            force_rollback = 0
        if force_rollback:
            rhnSQL.rollback()
        rhnSQL.clear_log_id()
        # and now send everything back
        ret = self.response(response)
        log_debug(4, "Leave with return value", ret)
        return ret
Example #41
0
    def entitle(self, server_id, history, virt_type=None):
        """
        Entitle a server according to the entitlements we have configured.
        """
        log_debug(3, self.entitlements)

        entitle_server = rhnSQL.Procedure("rhn_entitlements.entitle_server")
        # TODO: entitle_server calls can_entitle_server, so we're doing this
        # twice for each successful call. Is it necessary for external error
        # handling or can we ditch it?
        can_entitle_server = rhnSQL.Function(
            "rhn_entitlements.can_entitle_server", rhnSQL.types.NUMBER())

        can_ent = None

        history["entitlement"] = ""

        # Do a quick check to see if both virt entitlements are present. (i.e.
        # activation keys stacked together) If so, give preference to the more
        # powerful virtualization platform and remove the regular virt
        # entitlement from the list.
        found_virt = False
        found_virt_platform = False
        for entitlement in self.entitlements:
            if entitlement[0] == VIRT_ENT_LABEL:
                found_virt = True
            elif entitlement[0] == VIRT_PLATFORM_ENT_LABEL:
                found_virt_platform = True

        for entitlement in self.entitlements:
            if virt_type is not None and entitlement[0] in \
                    (VIRT_ENT_LABEL, VIRT_PLATFORM_ENT_LABEL):
                continue

            # If both virt entitlements are present, skip the least powerful:
            if found_virt and found_virt_platform and entitlement[
                    0] == VIRT_ENT_LABEL:
                log_debug(
                    1, "Virtualization and Virtualization Platform " +
                    "entitlements both present.")
                log_debug(1, "Skipping Virtualization.")
                continue

            try:
                can_ent = can_entitle_server(server_id, entitlement[0])
            except rhnSQL.SQLSchemaError, e:
                can_ent = 0

            try:
                # bugzilla #160077, skip attempting to entitle if we cant
                if can_ent:
                    entitle_server(server_id, entitlement[0])
            except rhnSQL.SQLSchemaError, e:
                log_error("Token failed to entitle server", server_id,
                          self.get_names(), entitlement[0], e.errmsg)
                if e.errno == 20220:
                    # ORA-20220: (servergroup_max_members) - Server group membership
                    # cannot exceed maximum membership
                    raise rhnFault(
                        91,
                        _("Registration failed: RHN Software service entitlements exhausted: %s"
                          ) % entitlement[0]), None, sys.exc_info()[2]
                # No idea what error may be here...
                raise rhnFault(90, e.errmsg), None, sys.exc_info()[2]
Example #42
0
    def handler(self):
        """ Main handler to handle all requests pumped through this server. """

        # pylint: disable=R0915
        log_debug(1)
        self._prepHandler()

        _oto = rhnFlags.get('outputTransportOptions')

        # tell parent that we can follow redirects, even if client is not able to
        _oto['X-RHN-Transport-Capability'] = "follow-redirects=3"

        # No reason to put Host: in the header, the connection object will
        # do that for us

        # Add/modify the X-RHN-IP-Path header.
        ip_path = None
        if 'X-RHN-IP-Path' in _oto:
            ip_path = _oto['X-RHN-IP-Path']
        log_debug(4, "X-RHN-IP-Path is: %s" % repr(ip_path))
        client_ip = self.req.connection.remote_ip
        if ip_path is None:
            ip_path = client_ip
        else:
            ip_path += ',' + client_ip
        _oto['X-RHN-IP-Path'] = ip_path

        # NOTE: X-RHN-Proxy-Auth described in broker/rhnProxyAuth.py
        if 'X-RHN-Proxy-Auth' in _oto:
            log_debug(5, 'X-RHN-Proxy-Auth currently set to: %s' % repr(_oto['X-RHN-Proxy-Auth']))
        else:
            log_debug(5, 'X-RHN-Proxy-Auth is not set')

        if self.req.headers_in.has_key('X-RHN-Proxy-Auth'):
            tokens = []
            if 'X-RHN-Proxy-Auth' in _oto:
                tokens = _oto['X-RHN-Proxy-Auth'].split(',')
            log_debug(5, 'Tokens: %s' % tokens)

        # GETs: authenticate user, and service local GETs.
        getResult = self.__local_GET_handler(self.req)
        if getResult is not None:
            # it's a GET request
            return getResult

        # 1. check cached version of the proxy login,
        #    snag token if there...
        #    if not... login...
        #    if good token, cache it.
        # 2. push into headers.
        authToken = self.proxyAuth.check_cached_token()
        log_debug(5, 'Auth token for this machine only! %s' % authToken)
        tokens = []

        _oto = rhnFlags.get('outputTransportOptions')
        if _oto.has_key('X-RHN-Proxy-Auth'):
            log_debug(5, '    (auth token prior): %s' % repr(_oto['X-RHN-Proxy-Auth']))
            tokens = _oto['X-RHN-Proxy-Auth'].split(',')

        # list of tokens to be pushed into the headers.
        tokens.append(authToken)
        tokens = [t for t in tokens if t]

        _oto['X-RHN-Proxy-Auth'] = ','.join(tokens)
        log_debug(5, '    (auth token after): %s'
                  % repr(_oto['X-RHN-Proxy-Auth']))

        log_debug(3, 'Trying to connect to parent')

        # Loops twice? Here's why:
        #   o If no errors, the loop is broken and we move on.
        #   o If an error, either we get a new token and try again,
        #     or we get a critical error and we fault.
        for _i in range(2):
            self._connectToParent()  # part 1

            log_debug(4, 'after _connectToParent')
            # Add the proxy version
            rhnFlags.get('outputTransportOptions')['X-RHN-Proxy-Version'] = str(_PROXY_VERSION)

            status = self._serverCommo()       # part 2

            # check for proxy authentication blowup.
            respHeaders = self.responseContext.getHeaders()
            if not respHeaders or \
               not respHeaders.has_key('X-RHN-Proxy-Auth-Error'):
                # No proxy auth errors
                # XXX: need to verify that with respHeaders ==
                #      None that is is correct logic. It should be -taw
                break

            error = str(respHeaders['X-RHN-Proxy-Auth-Error']).split(':')[0]

            # If a proxy other than this one needs to update its auth token
            # pass the error on up to it
            if (respHeaders.has_key('X-RHN-Proxy-Auth-Origin') and
                    respHeaders['X-RHN-Proxy-Auth-Origin'] != self.proxyAuth.hostname):
                break

            # Expired/invalid auth token; go through the loop once again
            if error == '1003':  # invalid token
                msg = "Spacewalk Proxy Session Token INVALID -- bad!"
                log_error(msg)
                log_debug(0, msg)
            elif error == '1004':
                log_debug(1,
                          "Spacewalk Proxy Session Token expired, acquiring new one.")
            else:  # this should never happen.
                msg = "Spacewalk Proxy login failed, error code is %s" % error
                log_error(msg)
                log_debug(0, msg)
                raise rhnFault(1000,
                               _("Spacewalk Proxy error (issues with proxy login). "
                                 "Please contact your system administrator."))

            # Forced refresh of the proxy token
            rhnFlags.get('outputTransportOptions')['X-RHN-Proxy-Auth'] = self.proxyAuth.check_cached_token(1)
        else:  # for
            # The token could not be aquired
            log_debug(0, "Unable to acquire proxy authentication token")
            raise rhnFault(1000,
                           _("Spacewalk Proxy error (unable to acquire proxy auth token). "
                             "Please contact your system administrator."))

        # Support for yum byte-range
        if (status != apache.OK) and (status != apache.HTTP_PARTIAL_CONTENT):
            log_debug(1, "Leaving handler with status code %s" % status)
            return status

        self.__handleAction(self.responseContext.getHeaders())

        return self._clientCommo()
Example #43
0
class ActivationTokens:
    """
    An aggregation of activation tokens, exposing important information
    like org_id, user_id etc in a unified manner.
    """
    is_rereg_token = 0
    forget_rereg_token = 0

    def __init__(self,
                 tokens,
                 user_id=None,
                 org_id=None,
                 kickstart_session_id=None,
                 entitlements=[],
                 deploy_configs=None):
        self.tokens = tokens
        self.user_id = user_id
        self.org_id = org_id
        self.kickstart_session_id = kickstart_session_id
        #        self.entitlement_label = entitlement_label
        #        self.entitlement_name = entitlement_name
        # Boolean
        self.deploy_configs = deploy_configs
        # entitlements is list of tuples [(name, label)]
        self.entitlements = entitlements

    def __nonzero__(self):
        return (len(self.tokens) > 0)

    def get_server_id(self):
        if not self:
            return None
        # We can have only one re-activation key
        for token in self.tokens:
            server_id = token.get('server_id')
            if server_id:
                return server_id
        # We hit this when no re-activation key
        return None

    def get_user_id(self):
        return self.user_id

    def get_org_id(self):
        return self.org_id

    def get_kickstart_session_id(self):
        return self.kickstart_session_id

    def get_entitlements(self):
        return self.entitlements

    def has_entitlement_label(self, entitlement):
        if entitlement in map(lambda x: x[0], self.entitlements):
            return 1
        return 0

    def get_deploy_configs(self):
        return self.deploy_configs

    def get_names(self):
        """ Returns a string of the entitlement names that the token grants.
            This function is poorly named.
        """
        token_names = map(lambda x: x[0], self.entitlements)
        if not token_names:
            return None
        return ",".join(token_names)

    def get_tokens(self):
        tokens = []
        for token in self.tokens:
            tokens.append(token['token'])

        return tokens

    def entitle(self, server_id, history, virt_type=None):
        """
        Entitle a server according to the entitlements we have configured.
        """
        log_debug(3, self.entitlements)

        entitle_server = rhnSQL.Procedure("rhn_entitlements.entitle_server")
        # TODO: entitle_server calls can_entitle_server, so we're doing this
        # twice for each successful call. Is it necessary for external error
        # handling or can we ditch it?
        can_entitle_server = rhnSQL.Function(
            "rhn_entitlements.can_entitle_server", rhnSQL.types.NUMBER())

        can_ent = None

        history["entitlement"] = ""

        # Do a quick check to see if both virt entitlements are present. (i.e.
        # activation keys stacked together) If so, give preference to the more
        # powerful virtualization platform and remove the regular virt
        # entitlement from the list.
        found_virt = False
        found_virt_platform = False
        for entitlement in self.entitlements:
            if entitlement[0] == VIRT_ENT_LABEL:
                found_virt = True
            elif entitlement[0] == VIRT_PLATFORM_ENT_LABEL:
                found_virt_platform = True

        for entitlement in self.entitlements:
            if virt_type is not None and entitlement[0] in \
                    (VIRT_ENT_LABEL, VIRT_PLATFORM_ENT_LABEL):
                continue

            # If both virt entitlements are present, skip the least powerful:
            if found_virt and found_virt_platform and entitlement[
                    0] == VIRT_ENT_LABEL:
                log_debug(
                    1, "Virtualization and Virtualization Platform " +
                    "entitlements both present.")
                log_debug(1, "Skipping Virtualization.")
                continue

            try:
                can_ent = can_entitle_server(server_id, entitlement[0])
            except rhnSQL.SQLSchemaError, e:
                can_ent = 0

            try:
                # bugzilla #160077, skip attempting to entitle if we cant
                if can_ent:
                    entitle_server(server_id, entitlement[0])
            except rhnSQL.SQLSchemaError, e:
                log_error("Token failed to entitle server", server_id,
                          self.get_names(), entitlement[0], e.errmsg)
                if e.errno == 20220:
                    # ORA-20220: (servergroup_max_members) - Server group membership
                    # cannot exceed maximum membership
                    raise rhnFault(
                        91,
                        _("Registration failed: RHN Software service entitlements exhausted: %s"
                          ) % entitlement[0]), None, sys.exc_info()[2]
                # No idea what error may be here...
                raise rhnFault(90, e.errmsg), None, sys.exc_info()[2]
            except rhnSQL.SQLError, e:
                log_error("Token failed to entitle server", server_id,
                          self.get_names(), entitlement[0], e.args)
                raise rhnFault(90, str(e)), None, sys.exc_info()[2]
Example #44
0
def fetch_token(token_string):
    """ Fetches a token from the database """
    log_debug(3, token_string)
    # A token should always be passed to this function
    assert token_string
    tokens = token_string.split(',')
    h = rhnSQL.prepare(_query_token)
    result = []
    rereg_token_found = 0
    num_of_rereg = 0
    # Global user_id and org_id
    user_id = None
    same_user_id = 1
    org_id = None
    ks_session_id_token = None
    deploy_configs = None
    entitlements_base = {}
    entitlements_extra = {}

    # List of re-registration entitlements labels (if found):
    rereg_ents = []

    for token in tokens:
        h.execute(token=token)
        token_entry, token_entitlements = _fetch_token_from_cursor(h)

        if not token_entry:
            # Unable to find the token
            log_error("Invalid token '%s'" % token)
            raise rhnFault(60,
                           _("Could not find token '%s'") % token,
                           explain=0)

        row = token_entry

        if row.get('server_id'):
            rereg_token_found = row
            num_of_rereg += 1

            # Store the re-reg ents:
            for tup in token_entitlements.keys():
                rereg_ents.append(tup[0])

        # Check user_id
        token_user_id = row.get('user_id')

        # 4/27/05 wregglej - Commented this line out 'cause the token_user_id should
        # be allowed to be None. This line was causing problems when registering with
        # an activation key whose creator had been deleted.
        #assert(token_user_id is not None)

        if same_user_id and user_id is not None and user_id != token_user_id:
            log_debug(4,
                      "Different user ids: %s, %s" % (same_user_id, user_id))
            # This token has a different user id than the rest
            same_user_id = 0
        else:
            user_id = token_user_id

        # Check org_id
        token_org_id = row.get('org_id')
        assert (token_org_id is not None)
        if org_id is not None and org_id != token_org_id:
            # Cannot use activation keys from different orgs
            raise rhnFault(63, _("Tokens from mismatching orgs"), explain=0)
        org_id = token_org_id

        # Check kickstart session ids
        token_ks_session_id = row.get('kickstart_session_id')
        if token_ks_session_id is not None:
            if ks_session_id_token is not None:
                ks_session_id = ks_session_id_token['kickstart_session_id']
                if ks_session_id != token_ks_session_id:
                    # Two tokens with different kickstart sessions
                    raise rhnFault(63,
                                   _("Kickstart session mismatch"),
                                   explain=0)
            else:
                # This token has kickstart session id info
                ks_session_id_token = row

        # Iterate through the entitlements from this token
        # and intead of picking one entitlement, create a union of
        # all the entitlemts as a list of tuples of (name, label) aka
        # (token_type, token_desc)
        _categorize_token_entitlements(token_entitlements, entitlements_base,
                                       entitlements_extra)

        # Deploy configs?
        deploy_configs = deploy_configs or (row['deploy_configs'] == 'Y')
        result.append(row)

    # One should not stack re-activation tokens
    if num_of_rereg > 1:
        raise rhnFault(
            63,
            _("Stacking of re-registration tokens is not supported"),
            explain=0)

    entitlements_remove = []
    _validate_entitlements(token_string, rereg_ents, entitlements_base,
                           entitlements_extra, entitlements_remove)
    log_debug(5, "entitlements_base = %s" % entitlements_base)
    log_debug(5, "entitlements_extra = %s" % entitlements_extra)

    if ks_session_id_token:
        ks_session_id = ks_session_id_token['kickstart_session_id']
    else:
        ks_session_id = None

    # akl add entitles array constructed above to kwargs
    kwargs = {
        'user_id': user_id,
        'org_id': org_id,
        'kickstart_session_id': ks_session_id,
        'entitlements': entitlements_base.keys() + entitlements_extra.keys(),
        'deploy_configs': deploy_configs,
    }
    log_debug(4, "Values", kwargs)

    if rereg_token_found and len(result) > 1:
        log_debug(4, "re-activation stacked with activationkeys")
        kwargs['remove_entitlements'] = entitlements_remove
        return ReRegistrationActivationToken(result, **kwargs)
    elif rereg_token_found:
        log_debug(4, "simple re-activation")
        return ReRegistrationToken([rereg_token_found], **kwargs)

    return ActivationTokens(result, **kwargs)
Example #45
0
    def _querySatelliteForChecksum(self, req):
        """ Sends a HEAD request to the satellite for the purpose of obtaining
            the checksum for the requested resource.  A (status, checksum)
            tuple is returned.  If status is not apache.OK, checksum will be
            None.  If status is OK, and a checksum is not returned, the old
            BZ 158236 behavior will be used.
        """
        scheme = SCHEME_HTTP
        if req.server.port == 443:
            scheme = SCHEME_HTTPS
        log_debug(6, "Using scheme: %s" % scheme)

        # Initiate a HEAD request to the satellite to retrieve the MD5 sum.
        # Actually, we make the request through our own proxy first, so
        # that we don't accidentally bypass necessary authentication
        # routines.  Since it's a HEAD request, the proxy will forward it
        # directly to the satellite like it would a POST request.

        host = "127.0.0.1"
        port = req.connection.local_addr[1]

        connection = self._createConnection(host, port, scheme)
        if not connection:
            # Couldn't form the connection.  Log an error and revert to the
            # old BZ 158236 behavior.  In order to be as robust as possible,
            # we won't fail here.

            log_error('HEAD req - Could not create connection to %s://%s:%s'
                      % (scheme, host, str(port)))
            return (apache.OK, None)

        # We obtained the connection successfully.  Construct the URL that
        # we'll connect to.

        pingURL = "%s://%s:%s%s" % (scheme, host, str(port), req.uri)
        log_debug(6, "Ping URI: %s" % pingURL)

        hdrs = UserDictCase()
        for k in req.headers_in.keys():
            if k.lower() != 'range':  # we want checksum of whole file
                hdrs[k] = req.headers_in[k]

        log_debug(9, "Using existing headers_in", hdrs)
        connection.request("HEAD", pingURL, None, hdrs)
        log_debug(6, "Connection made, awaiting response.")

        # Get the response.

        response = connection.getresponse()
        log_debug(6, "Received response status: %s" % response.status)
        connection.close()

        if (response.status != apache.HTTP_OK) and (response.status != apache.HTTP_PARTIAL_CONTENT):
            # Something bad happened.  Return back back to the client.

            log_debug(1, "HEAD req - Received error code in reponse: %s"
                      % (str(response.status)))
            return (response.status, None)

        # The request was successful.  Dig the MD5 checksum out of the headers.

        responseHdrs = response.msg
        if not responseHdrs:
            # No headers?!  This shouldn't happen at all.  But if it does,
            # revert to the old # BZ 158236 behavior.

            log_error("HEAD response - No HTTP headers!")
            return (apache.OK, None)

        if not responseHdrs.has_key(HEADER_CHECKSUM):
            # No checksum was provided.  This could happen if a newer
            # proxy is talking to an older satellite.  To keep things
            # running smoothly, we'll just revert to the BZ 158236
            # behavior.

            log_debug(1, "HEAD response - No X-RHN-Checksum field provided!")
            return (apache.OK, None)

        checksum = responseHdrs[HEADER_CHECKSUM]

        return (apache.OK, checksum)
    def __redirectToNextLocationNoRetry(self, loopProtection=False):
        """ This function will perform a redirection to the next location, as
            specified in the last response's "Location" header. This function will
            return an actual HTTP response status code.  If successful, it will
            return apache.HTTP_OK, not apache.OK.  If unsuccessful, this function
            will simply return; no retries will be performed.  The following error
            codes can be returned:

            HTTP_OK,HTTP_PARTIAL_CONTENT - Redirect successful.
            HTTP_MOVED_TEMPORARILY     - Redirect was redirected again by 3rd party.
            HTTP_MOVED_PERMANENTLY     - Redirect was redirected again by 3rd party.
            HTTP_INTERNAL_SERVER_ERROR - Error extracting redirect information
            HTTP_SERVICE_UNAVAILABLE   - Could not connect to 3rd party server,
                                         connection was reset, or a read error
                                         occurred during communication.
            HTTP_*                     - Any other HTTP status code may also be
                                         returned.

            Upon successful completion of this function, a new responseContext
            will be created and pushed onto the stack.
        """

        # Obtain the redirect location first before we replace the current
        # response context.  It's contained in the Location header of the
        # previous response.

        redirectLocation = self._get_header(rhnConstants.HEADER_LOCATION)

        # We are about to redirect to a new location so now we'll push a new
        # response context before we return any errors.
        self.responseContext.add()

        # There should always be a redirect URL passed back to us.  If not,
        # there's an error.

        if not redirectLocation:
            log_error("  No redirect location specified!")
            Traceback(mail=0)
            return apache.HTTP_INTERNAL_SERVER_ERROR

        # The _get_header function returns the value as a list.  There should
        # always be exactly one location specified.

        redirectLocation = redirectLocation[0]
        log_debug(1, "  Redirecting to: ", redirectLocation)

        # Tear apart the redirect URL.  We need the scheme, the host, the
        # port (if not the default), and the URI.

        _scheme, host, port, uri = self._parse_url(redirectLocation)

        # Add any params onto the URI since _parse_url doesn't include them.
        if redirectLocation.find('?') > -1:
            uri += redirectLocation[redirectLocation.index('?'):]

        # Now create a new connection.  We'll use SSL if configured to do
        # so.

        params = {
            'host': host,
            'port': port,
        }
        if CFG.has_key('timeout'):
            params['timeout'] = CFG.TIMEOUT
        if CFG.USE_SSL:
            log_debug(1, "  Redirecting with SSL.  Cert= ", self.caChain)
            params['trusted_certs'] = [self.caChain]
            connection = connections.HTTPSConnection(**params)
        else:
            log_debug(1, "  Redirecting withOUT SSL.")
            connection = connections.HTTPConnection(**params)

        # Put the connection into the current response context.
        self.responseContext.setConnection(connection)

        # Now open the connection to the 3rd party server.

        log_debug(4, "Attempting to connect to 3rd party server...")
        try:
            connection.connect()
        except socket.error, e:
            log_error("Error opening redirect connection", redirectLocation, e)
            Traceback(mail=0)
            return apache.HTTP_SERVICE_UNAVAILABLE
class RedirectHandler(SharedHandler):
    """ Spacewalk Proxy SSL Redirect specific handler code called by rhnApache.

        Workflow is:
        Client -> Apache:Broker -> Squid -> Apache:Redirect -> Satellite

        Redirect handler get all request for localhost:80 and they come
        from Broker handler through Squid, which hadle caching.
        Redirect module transform destination url to parent or http proxy.
        Depend on what we have in CFG.
    """
    def __init__(self, req):
        SharedHandler.__init__(self, req)
        self.componentType = 'proxy.redirect'
        self._initConnectionVariables(req)
        self.rhnParentXMLRPC = None

    def _initConnectionVariables(self, _req):
        """ set connection variables
            NOTE: self.{caChain,rhnParent,httpProxy*} are initialized
                  in SharedHandler
        """

        effectiveURI = self._getEffectiveURI()
        effectiveURI_parts = urlparse(effectiveURI)
        scheme = 'http'
        if CFG.USE_SSL:
            scheme = 'https'
        else:
            self.caChain = ''
        self.rhnParentXMLRPC = urlunparse(
            (scheme, self.rhnParent, '/XMLRPC', '', '', ''))
        self.rhnParent = urlunparse((scheme, self.rhnParent) +
                                    effectiveURI_parts[2:])

        log_debug(3, 'remapped self.rhnParent:       %s' % self.rhnParent)
        log_debug(3,
                  'remapped self.rhnParentXMLRPC: %s' % self.rhnParentXMLRPC)

    def handler(self):
        """ Main handler for all requests pumped through this server. """

        log_debug(4, 'In redirect handler')
        self._prepHandler()

        # Rebuild the X-Forwarded-For header so that it reflects the actual
        # path of the request.  We must do this because squid is unable to
        # determine the "real" client, and will make each entry in the chain
        # 127.0.0.1.
        _oto = rhnFlags.get('outputTransportOptions')
        _oto['X-Forwarded-For'] = _oto['X-RHN-IP-Path']

        self.rhnParent = self.rhnParent or ''  # paranoid

        log_debug(4, 'Connecting to parent...')
        self._connectToParent()  # part 1

        log_debug(4, 'Initiating communication with server...')
        status = self._serverCommo()  # part 2
        if (status != apache.OK) and (status != apache.HTTP_PARTIAL_CONTENT):
            log_debug(3, "Leaving handler with status code %s" % status)
            return status

        log_debug(4, 'Initiating communication with client...')
        # If we got this far, it has to be a good response
        return self._clientCommo(status)

    def _handleServerResponse(self, status):
        """ Here, we'll override the default behavior for handling server responses
            so that we can adequately handle 302's.

            We will follow redirects unless it is redirect to (re)login page. In which
            case we change protocol to https and return redirect to user.
        """

        # In case of a 302, redirect the original request to the location
        # specified in the response.

        if status == apache.HTTP_MOVED_TEMPORARILY or \
           status == apache.HTTP_MOVED_PERMANENTLY:

            log_debug(1, "Received redirect response: ", status)

            # if we redirected to ssl version of login page, send redirect directly to user
            headers = self.responseContext.getHeaders()
            if headers is not None:
                for headerKey in headers.keys():
                    if headerKey == 'location':
                        location = self._get_header(headerKey)
                        relogin = re.compile(
                            r'https?://.*(/rhn/(Re)?Login.do\?.*)')
                        m = relogin.match(location[0])
                        if m:
                            # pull server name out of "t:o:k:e:n:hostname1,t:o:k:e:n:hostname2,..."
                            proxy_auth = self.req.headers_in[
                                'X-RHN-Proxy-Auth']
                            last_auth = proxy_auth.split(',')[-1]
                            server_name = last_auth.split(':')[-1]
                            log_debug(
                                1, "Redirecting to SSL version of login page")
                            rhnLib.setHeaderValue(
                                self.req.headers_out, 'Location',
                                "https://%s%s" % (server_name, m.group(1)))
                            return apache.HTTP_MOVED_PERMANENTLY

            redirectStatus = self.__redirectToNextLocation()

            # At this point, we've either:
            #
            #     (a) successfully redirected to the 3rd party
            #     (b) been told to redirect somewhere else from the 3rd party
            #     (c) run out of retry attempts
            #
            # We'll keep redirecting until we've received HTTP_OK or an error.

            while redirectStatus == apache.HTTP_MOVED_PERMANENTLY or \
                    redirectStatus == apache.HTTP_MOVED_TEMPORARILY:

                # We've been told to redirect again.  We'll pass a special
                # argument to ensure that if we end up back at the server, we
                # won't be redirected again.

                log_debug(1, "Redirected again!  Code=", redirectStatus)
                redirectStatus = self.__redirectToNextLocation(True)

            if (redirectStatus != apache.HTTP_OK) and (
                    redirectStatus != apache.HTTP_PARTIAL_CONTENT):

                # We must have run out of retry attempts.  Fail over to Hosted
                # to perform the request.

                log_debug(
                    1, "Redirection failed; retries exhausted.  "
                    "Failing over.  Code=", redirectStatus)
                redirectStatus = self.__redirectFailover()

            return SharedHandler._handleServerResponse(self, redirectStatus)

        else:
            # Otherwise, revert to default behavior.
            return SharedHandler._handleServerResponse(self, status)

    def __redirectToNextLocation(self, loopProtection=False):
        """ This function will perform a redirection to the next location, as
            specified in the last response's "Location" header. This function will
            return an actual HTTP response status code.  If successful, it will
            return apache.HTTP_OK, not apache.OK.  If unsuccessful, this function
            will retry a configurable number of times, as defined in
            CFG.NETWORK_RETRIES.  The following codes define "success".

              HTTP_OK
              HTTP_PARTIAL_CONTENT
              HTTP_MOVED_TEMPORARILY
              HTTP_MOVED_PERMANENTLY

            Upon successful completion of this function, the responseContext
            should be populated with the response.

            Arguments:

            loopProtection - If True, this function will insert a special
                           header into the new request that tells the RHN
                           server not to issue another redirect to us, in case
                           that's where we end up being redirected.

            Return:

            This function may return any valid HTTP_* response code.  See
            __redirectToNextLocationNoRetry for more info.
        """
        retriesLeft = CFG.NETWORK_RETRIES

        # We'll now try to redirect to the 3rd party.  We will keep
        # retrying until we exhaust the number of allowed attempts.
        # Valid response codes are:
        #     HTTP_OK
        #     HTTP_PARTIAL_CONTENT
        #     HTTP_MOVED_PERMANENTLY
        #     HTTP_MOVED_TEMPORARILY

        redirectStatus = self.__redirectToNextLocationNoRetry(loopProtection)
        while redirectStatus != apache.HTTP_OK and redirectStatus != apache.HTTP_PARTIAL_CONTENT and \
                        redirectStatus != apache.HTTP_MOVED_PERMANENTLY and \
                        redirectStatus != apache.HTTP_MOVED_TEMPORARILY and retriesLeft > 0:

            retriesLeft = retriesLeft - 1
            log_debug(1, "Redirection failed; trying again.  "
                      "Retries left=", retriesLeft, "Code=", redirectStatus)

            # Pop the current response context and restore the state to
            # the last successful response.  The acts of remove the current
            # context will cause all of its open connections to be closed.
            self.responseContext.remove()

            # XXX: Possibly sleep here for a second?
            redirectStatus = \
                self.__redirectToNextLocationNoRetry(loopProtection)

        return redirectStatus

    def __redirectToNextLocationNoRetry(self, loopProtection=False):
        """ This function will perform a redirection to the next location, as
            specified in the last response's "Location" header. This function will
            return an actual HTTP response status code.  If successful, it will
            return apache.HTTP_OK, not apache.OK.  If unsuccessful, this function
            will simply return; no retries will be performed.  The following error
            codes can be returned:

            HTTP_OK,HTTP_PARTIAL_CONTENT - Redirect successful.
            HTTP_MOVED_TEMPORARILY     - Redirect was redirected again by 3rd party.
            HTTP_MOVED_PERMANENTLY     - Redirect was redirected again by 3rd party.
            HTTP_INTERNAL_SERVER_ERROR - Error extracting redirect information
            HTTP_SERVICE_UNAVAILABLE   - Could not connect to 3rd party server,
                                         connection was reset, or a read error
                                         occurred during communication.
            HTTP_*                     - Any other HTTP status code may also be
                                         returned.

            Upon successful completion of this function, a new responseContext
            will be created and pushed onto the stack.
        """

        # Obtain the redirect location first before we replace the current
        # response context.  It's contained in the Location header of the
        # previous response.

        redirectLocation = self._get_header(rhnConstants.HEADER_LOCATION)

        # We are about to redirect to a new location so now we'll push a new
        # response context before we return any errors.
        self.responseContext.add()

        # There should always be a redirect URL passed back to us.  If not,
        # there's an error.

        if not redirectLocation:
            log_error("  No redirect location specified!")
            Traceback(mail=0)
            return apache.HTTP_INTERNAL_SERVER_ERROR

        # The _get_header function returns the value as a list.  There should
        # always be exactly one location specified.

        redirectLocation = redirectLocation[0]
        log_debug(1, "  Redirecting to: ", redirectLocation)

        # Tear apart the redirect URL.  We need the scheme, the host, the
        # port (if not the default), and the URI.

        _scheme, host, port, uri = self._parse_url(redirectLocation)

        # Add any params onto the URI since _parse_url doesn't include them.
        if redirectLocation.find('?') > -1:
            uri += redirectLocation[redirectLocation.index('?'):]

        # Now create a new connection.  We'll use SSL if configured to do
        # so.

        params = {
            'host': host,
            'port': port,
        }
        if CFG.has_key('timeout'):
            params['timeout'] = CFG.TIMEOUT
        if CFG.USE_SSL:
            log_debug(1, "  Redirecting with SSL.  Cert= ", self.caChain)
            params['trusted_certs'] = [self.caChain]
            connection = connections.HTTPSConnection(**params)
        else:
            log_debug(1, "  Redirecting withOUT SSL.")
            connection = connections.HTTPConnection(**params)

        # Put the connection into the current response context.
        self.responseContext.setConnection(connection)

        # Now open the connection to the 3rd party server.

        log_debug(4, "Attempting to connect to 3rd party server...")
        try:
            connection.connect()
        except socket.error, e:
            log_error("Error opening redirect connection", redirectLocation, e)
            Traceback(mail=0)
            return apache.HTTP_SERVICE_UNAVAILABLE
        log_debug(4, "Connected to 3rd party server:",
                  connection.sock.getpeername())

        # Put the request out on the wire.

        response = None
        try:
            # We'll redirect to the URI made in the original request, but with
            # the new server instead.

            log_debug(4, "Making request: ", self.req.method, uri)
            connection.putrequest(self.req.method, uri)

            # Add some custom headers.

            if loopProtection:
                connection.putheader(rhnConstants.HEADER_RHN_REDIRECT, '0')

            log_debug(4, "  Adding original URL header: ", self.rhnParent)
            connection.putheader(rhnConstants.HEADER_RHN_ORIG_LOC,
                                 self.rhnParent)

            # Add all the other headers in the original request in case we
            # need to re-authenticate with Hosted.

            for hdr in self.req.headers_in.keys():
                if hdr.lower().startswith("x-rhn"):
                    connection.putheader(hdr, self.req.headers_in[hdr])
                    log_debug(4, "Passing request header: ", hdr,
                              self.req.headers_in[hdr])

            connection.endheaders()

            response = connection.getresponse()
        except IOError, ioe:
            # Raised by getresponse() if server closes connection on us.
            log_error("Redirect connection reset by peer.", redirectLocation,
                      ioe)
            Traceback(mail=0)

            # The connection is saved in the current response context, and
            # will be closed when the caller pops the context.
            return apache.HTTP_SERVICE_UNAVAILABLE
Example #48
0
    def login(self):
        """ Login and fetch new token (proxy token).

            How it works in a nutshell.
            Only the broker component uses this. We perform a xmlrpc request
            to rhn_parent. This occurs outside of the http process we are
            currently working on. So, we do this all on our own; do all of
            our own SSL decisionmaking etc. We use CFG.RHN_PARENT as we always
            bypass the SSL redirect.

            DESIGN NOTES:  what is the proxy auth token?
            -------------------------------------------
            An SUSE Manager Proxy auth token is a token fetched upon login from
            SUSE Manager Server or hosted.

            It has this format:
               'S:U:ST:EO:SIG'
            Where:
               S   = server ID
               U   = username
               ST  = server time
               EO  = expiration offset
               SIG = signature
               H   = hostname (important later)

            Within this function within the SUSE Manager Proxy Broker we also tag on
            the hostname to the end of the token. The token as described above
            is enough for authentication purposes, but we need a to identify
            the exact hostname (as the SUSE Manager Proxy sees it). So now the token
            becomes (token:hostname):
               'S:U:ST:EO:SIG:H'

            DESIGN NOTES:  what is X-RHN-Proxy-Auth?
            -------------------------------------------
            This is where we use the auth token beyond SUSE Manager Proxy login
            purposes. This a header used to track request routes through
            a hierarchy of SUSE Manager Proxies.

            X-RHN-Proxy-Auth is a header that passes proxy authentication
            information around in the form of an ordered list of tokens. This
            list is used to gain information as to how a client request is
            routed throughout an RHN topology.

            Format: 'S1:U1:ST1:EO1:SIG1:H1,S2:U2:ST2:EO2:SIG2:H2,...'
                     |_________1_________| |_________2_________| |__...
                             token                 token
                     where token is really: token:hostname

            leftmost token was the first token hit by a client request.
            rightmost token was the last token hit by a client request.

        """
        # pylint: disable=R0915

        log_debug(3)
        server = self.__getXmlrpcServer()
        error = None
        token = None
        # update the systemid/serverid if need be.
        self.__processSystemid()
        # Makes three attempts to login
        for _i in range(self.__nRetries):
            try:
                token = server.proxy.login(self.__systemid)
            except (socket.error, socket.sslerror) as e:
                if CFG.HTTP_PROXY:
                    # socket error, check to see if your HTTP proxy is running...
                    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                    httpProxy, httpProxyPort = CFG.HTTP_PROXY.split(':')
                    try:
                        s.connect((httpProxy, int(httpProxyPort)))
                    except socket.error as e:
                        error = [
                            'socket.error',
                            'HTTP Proxy not running? '
                            '(%s) %s' % (CFG.HTTP_PROXY, e)
                        ]
                        # rather big problem: http proxy not running.
                        log_error("*** ERROR ***: %s" % error[1])
                        Traceback(mail=0)
                    except socket.sslerror as e:
                        error = [
                            'socket.sslerror',
                            '(%s) %s' % (CFG.HTTP_PROXY, e)
                        ]
                        # rather big problem: http proxy not running.
                        log_error("*** ERROR ***: %s" % error[1])
                        Traceback(mail=0)
                    else:
                        error = ['socket', str(e)]
                        log_error(error)
                        Traceback(mail=0)
                else:
                    log_error("Socket error", e)
                    Traceback(mail=0)
                Traceback(mail=1)
                token = None
                time.sleep(.25)
                continue
            except SSL.SSL.Error as e:
                token = None
                error = ['rhn.SSL.SSL.Error', repr(e), str(e)]
                log_error(error)
                Traceback(mail=0)
                time.sleep(.25)
                continue
            except xmlrpclib.ProtocolError as e:
                token = None
                log_error('xmlrpclib.ProtocolError', e)
                time.sleep(.25)
                continue
            except xmlrpclib.Fault as e:
                # Report it through the mail
                # Traceback will try to walk over all the values
                # in each stack frame, and eventually will try to stringify
                # the method object itself
                # This should trick it, since the originator of the exception
                # is this function, instead of a deep call into xmlrpclib
                log_error("%s" % e)
                if e.faultCode == 10000:
                    # reraise it for the users (outage or "important message"
                    # coming through")
                    raise_with_tb(rhnFault(e.faultCode, e.faultString),
                                  sys.exc_info()[2])
                # ok... it's some other fault
                Traceback("ProxyAuth.login (Fault) - SUSE Manager Proxy not "
                          "able to log in.")
                # And raise a Proxy Error - the server made its point loud and
                # clear
                raise_with_tb(
                    rhnFault(
                        1000,
                        _("SUSE Manager Proxy error (during proxy login). "
                          "Please contact your system administrator.")),
                    sys.exc_info()[2])
            except Exception as e:
                token = None
                log_error("Unhandled exception", e)
                Traceback(mail=0)
                time.sleep(.25)
                continue
            else:
                break

        if not token:
            if error:
                if error[0] in ('xmlrpclib.ProtocolError', 'socket.error',
                                'socket'):
                    raise rhnFault(
                        1000,
                        _("SUSE Manager Proxy error (error: %s). "
                          "Please contact your system administrator.") %
                        error[0])
                if error[0] in ('rhn.SSL.SSL.Error', 'socket.sslerror'):
                    raise rhnFault(
                        1000,
                        _("SUSE Manager Proxy error (SSL issues? Error: %s). "
                          "Please contact your system administrator.") %
                        error[0])
                else:
                    raise rhnFault(1002, err_text='%s' % e)
            else:
                raise rhnFault(1001)
        if self.hostname:
            token = token + ':' + self.hostname
        log_debug(6, "New proxy token: %s" % token)
        return token
Example #49
0
class Shelf:
    """ Client authenication temp. db.

        Main class that the client side (client to the caching daemon) has to
        instantiate to expose the proper API. Basically, the API is a dictionary.
    """

    # pylint: disable=R0903

    def __init__(self, server_addr):
        log_debug(6, server_addr)
        self.serverAddr = server_addr

    def __request(self, methodname, params):
        # pylint: disable=R0915
        log_debug(6, methodname, params)
        # Init the socket
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

        try:
            sock.connect(self.serverAddr)
        except socket.error, e:
            sock.close()
            methodname = None
            log_error("Error connecting to the auth cache: %s" % str(e))
            Traceback("Shelf.__request",
                      extra="""
              Error connecting to the the authentication cache daemon.
              Make sure it is started on %s""" % str(self.serverAddr))
            # FIXME: PROBLEM: this rhnFault will never reach the client
            raise rhnFault(
                1000,
                _("Spacewalk Proxy error (issues connecting to auth cache). "
                  "Please contact your system administrator")
            ), None, sys.exc_info()[2]

        wfile = sock.makefile("w")

        try:
            send(wfile, methodname, None, *params)
        except CommunicationError:
            wfile.close()
            sock.close()
            Traceback("Shelf.__request",
                      extra="Encountered a CommunicationError")
            raise
        except socket.error:
            wfile.close()
            sock.close()
            log_error("Error communicating to the auth cache: %s" % str(e))
            Traceback("Shelf.__request",
                      extra="""\
                     Error sending to the authentication cache daemon.
                     Make sure the authentication cache daemon is started""")
            # FIXME: PROBLEM: this rhnFault will never reach the client
            raise rhnFault(
                1000,
                _("Spacewalk Proxy error (issues connecting to auth cache). "
                  "Please contact your system administrator")
            ), None, sys.exc_info()[2]

        wfile.close()

        rfile = sock.makefile("r")
        try:
            params, methodname = recv(rfile)
        except CommunicationError, e:
            log_error(e.faultString)
            rfile.close()
            sock.close()
            log_error("Error communicating to the auth cache: %s" % str(e))
            Traceback("Shelf.__request",
                      extra="""\
                      Error receiving from the authentication cache daemon.
                      Make sure the authentication cache daemon is started""")
            # FIXME: PROBLEM: this rhnFault will never reach the client
            raise rhnFault(
                1000,
                _("Spacewalk Proxy error (issues communicating to auth cache). "
                  "Please contact your system administrator")
            ), None, sys.exc_info()[2]
Example #50
0
                      '(%s) %s' % (CFG.HTTP_PROXY, e)]
             # rather big problem: http proxy not running.
             log_error("*** ERROR ***: %s" % error[1])
             Traceback(mail=0)
         except socket.sslerror, e:
             error = ['socket.sslerror',
                      '(%s) %s' % (CFG.HTTP_PROXY, e)]
             # rather big problem: http proxy not running.
             log_error("*** ERROR ***: %s" % error[1])
             Traceback(mail=0)
         else:
             error = ['socket', str(e)]
             log_error(error)
             Traceback(mail=0)
     else:
         log_error("Socket error", e)
         Traceback(mail=0)
     Traceback(mail=1)
     token = None
     time.sleep(.25)
     continue
 except SSL.SSL.Error, e:
     token = None
     error = ['rhn.SSL.SSL.Error', repr(e), str(e)]
     log_error(error)
     Traceback(mail=0)
     time.sleep(.25)
     continue
 except xmlrpclib.ProtocolError, e:
     token = None
     log_error('xmlrpclib.ProtocolError', e)
Example #51
0
    def login(self):
        """ Login and fetch new token (proxy token).

            How it works in a nutshell.
            Only the broker component uses this. We perform a xmlrpc request
            to rhn_parent. This occurs outside of the http process we are
            currently working on. So, we do this all on our own; do all of
            our own SSL decisionmaking etc. We use CFG.RHN_PARENT as we always
            bypass the SSL redirect.

            DESIGN NOTES:  what is the proxy auth token?
            -------------------------------------------
            An Spacewalk Proxy auth token is a token fetched upon login from
            Red Hat Satellite or hosted.

            It has this format:
               'S:U:ST:EO:SIG'
            Where:
               S   = server ID
               U   = username
               ST  = server time
               EO  = expiration offset
               SIG = signature
               H   = hostname (important later)

            Within this function within the Spacewalk Proxy Broker we also tag on
            the hostname to the end of the token. The token as described above
            is enough for authentication purposes, but we need a to identify
            the exact hostname (as the Spacewalk Proxy sees it). So now the token
            becomes (token:hostname):
               'S:U:ST:EO:SIG:H'

            DESIGN NOTES:  what is X-RHN-Proxy-Auth?
            -------------------------------------------
            This is where we use the auth token beyond Spacewalk Proxy login
            purposes. This a header used to track request routes through
            a hierarchy of RHN Proxies.

            X-RHN-Proxy-Auth is a header that passes proxy authentication
            information around in the form of an ordered list of tokens. This
            list is used to gain information as to how a client request is
            routed throughout an RHN topology.

            Format: 'S1:U1:ST1:EO1:SIG1:H1,S2:U2:ST2:EO2:SIG2:H2,...'
                     |_________1_________| |_________2_________| |__...
                             token                 token
                     where token is really: token:hostname

            leftmost token was the first token hit by a client request.
            rightmost token was the last token hit by a client request.

        """
        # pylint: disable=R0915

        log_debug(3)
        server = self.__getXmlrpcServer()
        error = None
        token = None
        # update the systemid/serverid if need be.
        self.__processSystemid()
        # Makes three attempts to login
        for _i in range(self.__nRetries):
            try:
                token = server.proxy.login(self.__systemid)
            except (socket.error, socket.sslerror), e:
                if CFG.HTTP_PROXY:
                    # socket error, check to see if your HTTP proxy is running...
                    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                    httpProxy, httpProxyPort = CFG.HTTP_PROXY.split(':')
                    try:
                        s.connect((httpProxy, int(httpProxyPort)))
                    except socket.error, e:
                        error = ['socket.error', 'HTTP Proxy not running? '
                                 '(%s) %s' % (CFG.HTTP_PROXY, e)]
                        # rather big problem: http proxy not running.
                        log_error("*** ERROR ***: %s" % error[1])
                        Traceback(mail=0)
                    except socket.sslerror, e:
                        error = ['socket.sslerror',
                                 '(%s) %s' % (CFG.HTTP_PROXY, e)]
                        # rather big problem: http proxy not running.
                        log_error("*** ERROR ***: %s" % error[1])
                        Traceback(mail=0)
                    else:
                        error = ['socket', str(e)]
                        log_error(error)
                        Traceback(mail=0)
            connection.endheaders()

            response = connection.getresponse()
        except IOError, ioe:
            # Raised by getresponse() if server closes connection on us.
            log_error("Redirect connection reset by peer.", redirectLocation,
                      ioe)
            Traceback(mail=0)

            # The connection is saved in the current response context, and
            # will be closed when the caller pops the context.
            return apache.HTTP_SERVICE_UNAVAILABLE

        except socket.error, se:
            # Some socket error occurred.  Possibly a read error.
            log_error("Redirect request failed.", redirectLocation, se)
            Traceback(mail=0)

            # The connection is saved in the current response context, and
            # will be closed when the caller pops the context.
            return apache.HTTP_SERVICE_UNAVAILABLE

        # Save the response headers and body FD in the current communication
        # context.

        self.responseContext.setBodyFd(response)
        self.responseContext.setHeaders(response.msg)

        log_debug(4, "Response headers: ",
                  self.responseContext.getHeaders().items())
        log_debug(4, "Got redirect response.  Status=", response.status)
Example #53
0
    def get(self, system_id, version=1, status={}):
        # Authenticate the system certificate
        if CFG.DISABLE_CHECKINS:
            self.update_checkin = 0
        else:
            self.update_checkin = 1
        self.auth_system(system_id)
        log_debug(1, self.server_id, version,
                  "checkins %s" % ["disabled", "enabled"][self.update_checkin])
        if status:
            self.__update_status(status)

        # Update the capabilities list
        rhnCapability.update_client_capabilities(self.server_id)

        # Invalidate failed actions
        self._invalidate_failed_prereq_actions()

        server_locked = self.server.server_locked()
        log_debug(3, "Server locked", server_locked)

        if self.__reboot_in_progress():
            log_debug(3, "Server reboot in progress", self.server_id)
            rhnSQL.commit()
            return ""

        ret = {}
        # get the action. Status codes are currently:
        # 0 Queued # 1 Picked Up # 2 Completed # 3 Failed
        # XXX: we should really be using labels from rhnActionType instead of
        #      hard coded type id numbers.
        # We fetch actions whose prerequisites have completed, and actions
        # that don't have prerequisites at all
        h = rhnSQL.prepare(self._query_queue_get)

        should_execute = 1

        # Loop to get a valid action
        # (only one valid action will be dealt with per execution of this function...)
        while 1:
            if should_execute:
                h.execute(server_id=self.server_id)
                should_execute = 0

            # Okay, got an action
            action = h.fetchone_dict()
            if not action:  # No actions available; bail out
                # Don't forget the commit at the end...
                ret = ""
                break
            action_id = action['id']
            log_debug(4, "Checking action %s" % action_id)
            # okay, now we have the action - process it.
            if action['remaining_tries'] < 1:
                log_debug(4, "Action %s picked up too many times" % action_id)
                # We've run out of pickup attempts for this action...
                self.__update_action(
                    action_id,
                    status=3,
                    message="This action has been picked up multiple times "
                    "without a successful transaction; "
                    "this action is now failed for this system.")
                # Invalidate actions that depend on this one
                self._invalidate_child_actions(action_id)
                # keep looking for a good action to process...
                continue

            if server_locked and action['unlocked_only'] == 'Y':
                # This action is locked
                log_debug(
                    4, "server id %s locked for action id %s" %
                    (self.server_id, action_id))
                continue

            try:
                if version == 1:
                    ret = self.__getV1(action)
                else:
                    ret = self.__getV2(action)
            except ShadowAction:  # Action the client should not see
                e = sys.exc_info()[1]
                # Make sure we re-execute the query, so we pick up whatever
                # extra actions were added
                should_execute = 1
                text = e.args[0]
                log_debug(4, "Shadow Action", text)
                self.__update_action(action['id'], 2, 0, text)
                continue
            except InvalidAction:  # This is an invalid action
                e = sys.exc_info()[1]
                # Update its status so it won't bother us again
                text = e.args[0]
                log_debug(4, "Invalid Action", text)
                self.__update_action(action['id'], 3, -99, text)
                continue
            except EmptyAction:
                e = sys.exc_info()[1]
                # this means that we have some sort of internal error
                # which gets reported in the logs. We don't touch the
                # action because this should get fixed on our side.
                log_error("Can not process action data", action, e.args)
                ret = ""
                break
            else:  # all fine
                # Update the status of the action
                h = rhnSQL.prepare("""
                update rhnServerAction
                    set status = 1,
                        pickup_time = current_timestamp,
                        remaining_tries = :tries - 1
                where action_id = :action_id
                  and server_id = :server_id
                """)
                h.execute(action_id=action["id"],
                          server_id=self.server_id,
                          tries=action["remaining_tries"])
                break

        # commit all changes
        rhnSQL.commit()

        return ret
Example #54
0
 def repl_func(self, match_object):
     try:
         return self._repl_func(match_object)
     except ValueError, e:
         log_error("cfg variable interpolation error", e)
         return match_object.group()
Example #55
0
 def __init__(self, msg):
     self.msg = msg
     log_error(msg)
Example #56
0
    def __checkAuthSessionTokenCache(self, token, channel):
        """ Authentication / authorize the channel """

        log_debug(2, token, channel)
        self.clientServerId = token['X-RHN-Server-ID']

        cachedToken = self.proxyAuth.get_client_token(self.clientServerId)
        if not cachedToken:
            # maybe client logged in through different load-balanced proxy
            # try to update the cache an try again
            cachedToken = self.proxyAuth.update_client_token_if_valid(
                self.clientServerId, token)

            if not cachedToken:
                msg = _("Invalid session key - server ID not found in cache: %s") \
                        % self.clientServerId
                log_error(msg)
                raise rhnFault(33, msg)

        self.cachedClientInfo = UserDictCase(cachedToken)

        clockSkew = self.cachedClientInfo["X-RHN-Auth-Proxy-Clock-Skew"]
        del self.cachedClientInfo["X-RHN-Auth-Proxy-Clock-Skew"]

        # Add the server id
        self.authChannels = self.cachedClientInfo['X-RHN-Auth-Channels']
        del self.cachedClientInfo['X-RHN-Auth-Channels']
        self.cachedClientInfo['X-RHN-Server-ID'] = self.clientServerId
        log_debug(4, 'Retrieved token from cache: %s' % self.cachedClientInfo)

        # Compare the two things
        if not _dictEquals(token, self.cachedClientInfo,
                           ['X-RHN-Auth-Channels']):
            # Maybe the client logged in through a different load-balanced
            # proxy? Check validity of the token the client passed us.
            updatedToken = self.proxyAuth.update_client_token_if_valid(
                self.clientServerId, token)
            # fix up the updated token the same way we did above
            if updatedToken:
                self.cachedClientInfo = UserDictCase(updatedToken)
                clockSkew = self.cachedClientInfo[
                    "X-RHN-Auth-Proxy-Clock-Skew"]
                del self.cachedClientInfo["X-RHN-Auth-Proxy-Clock-Skew"]
                self.authChannels = self.cachedClientInfo[
                    'X-RHN-Auth-Channels']
                del self.cachedClientInfo['X-RHN-Auth-Channels']
                self.cachedClientInfo['X-RHN-Server-ID'] = \
                        self.clientServerId
                log_debug(
                    4,
                    'Retrieved token from cache: %s' % self.cachedClientInfo)

            if not updatedToken or not _dictEquals(
                    token, self.cachedClientInfo, ['X-RHN-Auth-Channels']):
                log_debug(3, "Session tokens different")
                raise rhnFault(33)  # Invalid session key

        # Check the expiration
        serverTime = float(token['X-RHN-Auth-Server-Time'])
        offset = float(token['X-RHN-Auth-Expire-Offset'])
        if time.time() > serverTime + offset + clockSkew:
            log_debug(3, "Session token has expired")
            raise rhnFault(34)  # Session key has expired

        # Only autherized channels are the ones stored in the cache.
        authChannels = [x[0] for x in self.authChannels]
        log_debug(4, "Auth channels: '%s'" % authChannels)
        # Check the authorization
        if channel not in authChannels:
            log_debug(4,
                      "Not subscribed to channel %s; unauthorized" % channel)
            raise rhnFault(35, _('Unauthorized channel access requested.'))
Example #57
0
def verify(server_id, action_id, data={}):
    log_debug(3, action_id)

    if not data or not data.has_key('verify_info'):
        # some data should have been passed back...
        log_error("Insufficient package verify information returned",
                  server_id, action_id, data)
        return

    log_debug(4, "pkg verify data", data)

    # Remove old results
    h = rhnSQL.prepare(_query_delete_verify_results)
    h.execute(server_id=server_id, action_id=action_id)

    h = rhnSQL.prepare(_query_delete_verify_missing)
    h.execute(server_id=server_id, action_id=action_id)

    attrib_tests = ['S', 'M', '5', 'D', 'L', 'U', 'G', 'T']

    # Store the values for executemany() for the attribute-failures
    verify_attribs = {
        'server_id': [],
        'action_id': [],
        'package_name': [],
        'epoch': [],
        'version': [],
        'release': [],
        'arch': [],
        'filename': [],
        'attrib': [],
    }
    for test in attrib_tests:
        verify_attribs["test_" + test] = []

    # Store the "missing xxxx" results for executemany()
    missing_files = {
        'server_id': [],
        'action_id': [],
        'package_name': [],
        'epoch': [],
        'version': [],
        'release': [],
        'arch': [],
        'filename': []
    }

    # Uniquify the packages
    uq_packages = {}

    for package_spec, responses in data['verify_info']:
        package_spec = list(package_spec)
        # Fix the epoch
        if package_spec[3] == '':
            package_spec[3] = None
        package_spec = tuple(package_spec)
        if uq_packages.has_key(package_spec):
            # Been here already
            continue

        # We need to uniquify the file names within a package too
        hash = {}
        for response in responses:
            try:
                dict = _parse_response_line(response, attrib_tests)
            except InvalidResponseLine:
                log_error("packages.verify: (%s, %s): invalid line %s" %
                          (server_id, action_id, response))
                continue

            hash[dict['filename']] = dict

        # Add the rest of the variables to the dictionaries
        for filename, dict in hash.items():
            dict['server_id'] = server_id
            dict['action_id'] = action_id

            dict['package_name'] = package_spec[0]
            dict['version'] = package_spec[1]
            dict['release'] = package_spec[2]
            dict['epoch'] = package_spec[3]
            dict['arch'] = package_spec[4]

            if not dict.has_key('missing'):
                _hash_append(verify_attribs, dict)
            else:
                _hash_append(missing_files, dict)

        # This package was visited, store it
        uq_packages[package_spec] = None

    if verify_attribs['action_id']:
        h = rhnSQL.prepare(_query_insert_attribute_verify_results)
        h.executemany(**verify_attribs)

    if missing_files['action_id']:
        h = rhnSQL.prepare(_query_insert_missing_verify_results)
        h.executemany(**missing_files)

    rhnSQL.commit()
Example #58
0
def token_channels(server, server_arch, tokens_obj):
    """ Handle channel subscriptions for the registration token """
    assert (isinstance(tokens_obj, ActivationTokens))
    log_debug(1, "YYY token_channels", tokens_obj)

    server_id, server_arch_id = server['id'], server['server_arch_id']

    # what channels are associated with this token (filter only those
    # compatible with this server)
    h = rhnSQL.prepare("""
    select
        rtc.channel_id id, c.name, c.label, c.parent_channel
    from
        rhnRegTokenChannels rtc,
        rhnChannel c,
        rhnServerChannelArchCompat scac
    where rtc.token_id = :token_id
        and rtc.channel_id = c.id
        and c.channel_arch_id = scac.channel_arch_id
        and scac.server_arch_id = :server_arch_id
    """)

    chash = {}
    base_channel_token = None
    base_channel_id = None

    for token in tokens_obj.tokens:
        token_id = token['token_id']
        h.execute(token_id=token_id, server_arch_id=server_arch_id)
        while 1:
            row = h.fetchone_dict()
            if not row:
                break
            channel_id = row['id']
            chash[channel_id] = row
            if row['parent_channel'] is not None:
                # Not a base channel
                continue

            # We only allow for one base channel
            if base_channel_id is not None and channel_id != base_channel_id:
                # Base channels conflict - are they coming from the same
                # token?
                if base_channel_token == token:
                    log_error("Token has multiple base channels", token_id,
                              base_channel_id)
                    raise rhnFault(
                        62,
                        _("Token `%s' has more than one base channel assigned")
                        % token['note'])
                raise rhnFault(63, _("Conflicting base channels"))
            base_channel_id = channel_id
            base_channel_token = token

    bc = chash.get(base_channel_id)
    log_debug(4, "base channel", bc)

    # get the base channel for this server
    # Note that we are hitting this codepath after newserver.__save() has been
    # run, which means we've already chosen a base channel
    # from rhnDistChannelMap
    sbc = rhnChannel.get_base_channel(server_id, none_ok=1)

    # prepare the return value
    ret = []

    # now try to figure out which base channel we prefer
    if bc is None:
        if sbc is None:
            # we need at least one base channel definition
            log_error(
                "Server has invalid release and "
                "token contains no base channels", server_id,
                tokens_obj.tokens)
            ret.append("System registered without a base channel")
            ret.append("Unsupported release-architecture combination "
                       "(%s, %s)" % (server["release"], server_arch))
            return ret
    else:  # do we need to drop the one from sbc?
        if sbc and sbc["id"] != bc["id"]:  # we need to prefer the token one
            # unsubscribe from old channel(s)
            rhnChannel.unsubscribe_all_channels(server_id)
            sbc = None  # force true on the next test
        if sbc is None:
            # no base channel subscription at this point
            rhnChannel.subscribe_sql(server_id, bc["id"], commit=0)
            ret.append("Subscribed to base channel '%s' (%s)" %
                       (bc["name"], bc["label"]))
            sbc = bc

    # attempt to subscribe all non-base channels associated with this
    # token
    subscribe_channel = rhnSQL.Procedure("rhn_channel.subscribe_server")
    # Use a set here to ensure uniqueness of the
    # channel family ids used in the loop below.
    channel_family_ids = set()

    for c in [a for a in list(chash.values()) if a["parent_channel"]]:
        # make sure this channel has the right parent
        if str(c["parent_channel"]) != str(sbc["id"]):
            ret.append("NOT subscribed to channel '%s' "
                       "(not a child of '%s')" % (c["name"], sbc["name"]))
            continue
        try:
            # don't run the EC yet
            # XXX: test return code when this one will start returning
            # a status
            subscribe_channel(server_id, c["id"], 0, None)
            child = rhnChannel.Channel()
            child.load_by_id(c["id"])
            child._load_channel_families()
            cfamid = child._channel_families[0]
            channel_family_ids.add(cfamid)
        except rhnSQL.SQLError:
            e = sys.exc_info()[1]
            log_error("Failed channel subscription", server_id, c["id"],
                      c["label"], c["name"])
            ret.append("FAILED to subscribe to channel '%s'" % c["name"])
        else:
            ret.append("Subscribed to channel '%s'" % c["name"])

    log_debug(5, "cf ids: %s" % str(channel_family_ids))
    log_debug(5, "Server org_id: %s" % str(server['org_id']))

    return ret
Example #59
0
def _check_dep(server_id, action_id, failed_dep):
    log_debug(5, failed_dep)
    if not failed_dep:
        return
    if not isinstance(failed_dep, ListType):
        # Not the right format
        log_error("action_extra_data.packages.remove: server %s, action %s: "
                  "failed dep type error: %s" %
                  (server_id, action_id, type(failed_dep)))
        raise InvalidDep

    # This is boring, but somebody's got to do it
    if len(failed_dep) < 5:
        log_error("action_extra_data.packages.remove: server %s, action %s: "
                  "failed dep: not enough entries: %s" %
                  (server_id, action_id, len(failed_dep)))
        raise InvalidDep

    pkg, needs_pkg, flags, suggested, sense = failed_dep[:5]

    if not isinstance(pkg, ListType) or len(pkg) < 3:
        log_error("action_extra_data.packages.remove: server %s, action %s: "
                  "failed dep: bad package spec %s (type %s, len %s)" %
                  (server_id, action_id, pkg, type(pkg), len(pkg)))
        raise InvalidDep
    pkg = map(str, pkg[:3])

    if not isinstance(needs_pkg, ListType) or len(needs_pkg) < 2:
        log_error(
            "action_extra_data.packages.remove: server %s, action %s: "
            "failed dep: bad needs package spec %s (type %s, len %s)" %
            (server_id, action_id, needs_pkg, type(needs_pkg), len(needs_pkg)))
        raise InvalidDep
    needs_pkg = map(str, needs_pkg[:2])

    if not isinstance(flags, IntType):
        log_error("action_extra_data.packages.remove: server %s, action %s: "
                  "failed dep: bad flags type %s" %
                  (server_id, action_id, type(flags)))
        raise InvalidDep

    if not isinstance(sense, IntType):
        log_error("action_extra_data.packages.remove: server %s, action %s: "
                  "failed dep: bad sense type %s" %
                  (server_id, action_id, type(sense)))
        raise InvalidDep

    return pkg, needs_pkg, flags, str(suggested), sense
Example #60
0
    def __db_update_domain(self, host_id, uuid, properties, existing_row):

        # First, update the rhnVirtualInstance table.  If a guest domain was
        # registered but its host was not, it is possible that the
        # rhnVirtualInstance table's host_system_id column is null.  We'll
        # update that now, if need be.

        new_values_array = []
        bindings = {}

        if not existing_row.get('confirmed'):
            new_values_array.append('confirmed=1')

        if existing_row['host_system_id'] != host_id:
            new_values_array.append('host_system_id=:host_id')
            bindings['host_id'] = host_id

        # Only touch the database if something changed.
        if new_values_array:
            new_values = ', '.join(new_values_array)

            bindings['row_id'] = existing_row['rvi_id']

            update_sql = """
                UPDATE rhnVirtualInstance SET %s WHERE id=:row_id
            """ % (new_values)
            query = rhnSQL.prepare(update_sql)

            try:
                query.execute(**bindings)
            except rhnSQL.SQLError:
                e = sys.exc_info()[1]
                log_error(str(e))
                raise_with_tb(VirtualizationEventError(str(e)),
                              sys.exc_info()[2])

        # Now update the rhnVirtualInstanceInfo table.

        new_values_array = []
        bindings = {}

        if PropertyType.NAME in properties and \
           existing_row['name'] != properties[PropertyType.NAME]:
            new_values_array.append('name=:name')
            bindings['name'] = properties[PropertyType.NAME]

        if PropertyType.VCPUS in properties and \
           existing_row['vcpus'] != properties[PropertyType.VCPUS]:
            new_values_array.append('vcpus=:vcpus')
            bindings['vcpus'] = properties[PropertyType.VCPUS]

        if PropertyType.MEMORY in properties and \
           existing_row['memory_size_k'] != properties[PropertyType.MEMORY]:
            new_values_array.append('memory_size_k=:memory')
            bindings['memory'] = properties[PropertyType.MEMORY]

        if PropertyType.TYPE in properties and \
           existing_row['instance_type'] != properties[PropertyType.TYPE]:
            new_values_array.append("""
                instance_type = (
                    select rvit.id
                    from rhnVirtualInstanceType rvit
                    where rvit.label = :virt_type)
            """)
            bindings['virt_type'] = properties[PropertyType.TYPE]

        if PropertyType.STATE in properties and \
           existing_row['state'] != properties[PropertyType.STATE]:
            new_values_array.append("""
                state = (
                    SELECT rvis.id
                    FROM rhnVirtualInstanceState rvis
                    WHERE rvis.label = :state)
            """)
            bindings['state'] = properties[PropertyType.STATE]

        # Only touch the database if something changed.
        if new_values_array:
            new_values = ', '.join(new_values_array)

            bindings['row_id'] = existing_row['instance_id']

            update_sql = """
                UPDATE rhnVirtualInstanceInfo SET %s WHERE instance_id=:row_id
            """ % (new_values)
            query = rhnSQL.prepare(update_sql)
            query.execute(**bindings)