Exemplo n.º 1
0
    def response(self, response):
        # Send the xml-rpc response back
        log_debug(3, type(response))
        needs_xmlrpc_encoding = not rhnFlags.test("XMLRPC-Encoded-Response")
        compress_response = rhnFlags.test("compress_response")
        # Init an output object; we'll use it for sending data in various
        # formats
        if isinstance(response, transports.File):
            if not hasattr(response.file_obj, 'fileno') and compress_response:
                # This is a StringIO that has to be compressed, so read it in
                # memory; mark that we don't have to do any xmlrpc encoding
                response = response.file_obj.read()
                needs_xmlrpc_encoding = 0
            else:
                # Just treat is as a file
                return self.response_file(response)

        output = transports.Output()

        # First, use the same encoding/transfer that the client used
        output.set_transport_flags(
            transfer=transports.lookupTransfer(self.input.transfer),
            encoding=transports.lookupEncoding(self.input.encoding))

        if isinstance(response, xmlrpclib.Fault):
            log_debug(4, "Return FAULT", response.faultCode,
                      response.faultString)
            # No compression for faults because we'd like them to pop
            # up in clear text on the other side just in case
            output.set_transport_flags(output.TRANSFER_NONE,
                                       output.ENCODE_NONE)
        elif compress_response:
            # check if we have to compress this result
            log_debug(4, "Compression on for client version", self.client)
            if self.client > 0:
                output.set_transport_flags(output.TRANSFER_BINARY,
                                           output.ENCODE_ZLIB)
            else:  # original clients had the binary transport support broken
                output.set_transport_flags(output.TRANSFER_BASE64,
                                           output.ENCODE_ZLIB)

        # We simply add the transport options to the output headers
        output.headers.update(rhnFlags.get('outputTransportOptions').dict())

        if needs_xmlrpc_encoding:
            # Normalize the response
            response = self.normalize(response)
            try:
                response = xmlrpclib.dumps(response, methodresponse=1)
            except TypeError, e:
                log_debug(
                    4, "Error \"%s\" encoding response = %s" % (e, response))
                Traceback("apacheHandler.response",
                          self.req,
                          extra="Error \"%s\" encoding response = %s" %
                          (e, response),
                          severity="notification")
                return apache.HTTP_INTERNAL_SERVER_ERROR
            except:
Exemplo n.º 2
0
    def response(self, response):
        # Send the xml-rpc response back
        log_debug(3, type(response))
        needs_xmlrpc_encoding = not rhnFlags.test("XMLRPC-Encoded-Response")
        compress_response = rhnFlags.test("compress_response")
        # Init an output object; we'll use it for sending data in various
        # formats
        if isinstance(response, transports.File):
            if not hasattr(response.file_obj, "fileno") and compress_response:
                # This is a StringIO that has to be compressed, so read it in
                # memory; mark that we don't have to do any xmlrpc encoding
                response = response.file_obj.read()
                needs_xmlrpc_encoding = 0
            else:
                # Just treat is as a file
                return self.response_file(response)

        output = transports.Output()

        # First, use the same encoding/transfer that the client used
        output.set_transport_flags(
            transfer=transports.lookupTransfer(self.input.transfer),
            encoding=transports.lookupEncoding(self.input.encoding),
        )

        if isinstance(response, xmlrpclib.Fault):
            log_debug(4, "Return FAULT", response.faultCode, response.faultString)
            # No compression for faults because we'd like them to pop
            # up in clear text on the other side just in case
            output.set_transport_flags(output.TRANSFER_NONE, output.ENCODE_NONE)
        elif compress_response:
            # check if we have to compress this result
            log_debug(4, "Compression on for client version", self.client)
            if self.client > 0:
                output.set_transport_flags(output.TRANSFER_BINARY, output.ENCODE_ZLIB)
            else:  # original clients had the binary transport support broken
                output.set_transport_flags(output.TRANSFER_BASE64, output.ENCODE_ZLIB)

        # We simply add the transport options to the output headers
        output.headers.update(rhnFlags.get("outputTransportOptions").dict())

        if needs_xmlrpc_encoding:
            # Normalize the response
            response = self.normalize(response)
            try:
                response = xmlrpclib.dumps(response, methodresponse=1)
            except TypeError, e:
                log_debug(4, 'Error "%s" encoding response = %s' % (e, response))
                Traceback(
                    "apacheHandler.response",
                    self.req,
                    extra='Error "%s" encoding response = %s' % (e, response),
                    severity="notification",
                )
                return apache.HTTP_INTERNAL_SERVER_ERROR
            except:
Exemplo n.º 3
0
    def handler(self, req):
        """ Main handler to handle all requests pumped through this server. """

        ret = rhnApache.handler(self, req)
        if ret != apache.OK:
            return ret

        log_debug(4, "METHOD", req.method)
        log_debug(4, "PATH_INFO", req.path_info)
        log_debug(4, "URI (full path info)", req.uri)
        log_debug(4, "Component", self._component)

        if self._component == COMPONENT_BROKER:
            from broker import rhnBroker
            handlerObj = rhnBroker.BrokerHandler(req)
        else:
            # Redirect
            from redirect import rhnRedirect
            handlerObj = rhnRedirect.RedirectHandler(req)

        try:
            ret = handlerObj.handler()
        except rhnFault as e:
            return self.response(req, e)

        if rhnFlags.test("NeedEncoding"):
            return self.response(req, ret)

        # All good; we expect ret to be an HTTP return code
        if not isinstance(ret, type(1)):
            raise rhnException("Invalid status code type %s" % type(ret))
        log_debug(1, "Leaving with status code %s" % ret)
        return ret
Exemplo n.º 4
0
def auth_proxy():
    """ Authenticates a proxy carrying a clients request. For a valid or
        unsigned request, this function returns 1 (OK), otherwise it raises
        rhnFault

        NOTE: X-RHN-Proxy-Auth described in proxy/broker/rhnProxyAuth.py
    """

    log_debug(3)
    headers = rhnFlags.get('outputTransportOptions')
    if not rhnFlags.test('X-RHN-Proxy-Auth'):
        # No auth information; decline any action
        log_debug(4, "declined proxy authentication")
        headers['X-RHN-Proxy-Auth-Error'] = '%s:%s' % (
            1003, _("declined proxy authentication"))
        raise rhnFault(1003)  # Invalid session key

    # NOTE:
    #   - < v3.1 RHN proxies send only 1 token in this header
    #   - > v3.1: we send the route of the requests via multiple tokens
    #     "token1:hostname1,token2:hostname2" the first tuple is the first
    #     proxy hit.

    tokens = string.split(rhnFlags.get('X-RHN-Proxy-Auth'), ',')
    tokens = [token for token in tokens if token]

    for auth_token in tokens:
        _verifyProxyAuthToken(auth_token)

    # if no rhnFault was raised then the tokens all passed
    return 1
Exemplo n.º 5
0
def auth_proxy():
    """ Authenticates a proxy carrying a clients request. For a valid or
        unsigned request, this function returns 1 (OK), otherwise it raises
        rhnFault

        NOTE: X-RHN-Proxy-Auth described in proxy/broker/rhnProxyAuth.py
    """

    log_debug(3)
    headers = rhnFlags.get('outputTransportOptions')
    if not rhnFlags.test('X-RHN-Proxy-Auth'):
        # No auth information; decline any action
        log_debug(4, "declined proxy authentication")
        headers['X-RHN-Proxy-Auth-Error'] = '%s:%s' % (
            1003, _("declined proxy authentication"))
        raise rhnFault(1003)  # Invalid session key

    # NOTE:
    #   - < v3.1 RHN proxies send only 1 token in this header
    #   - > v3.1: we send the route of the requests via multiple tokens
    #     "token1:hostname1,token2:hostname2" the first tuple is the first
    #     proxy hit.

    tokens = string.split(rhnFlags.get('X-RHN-Proxy-Auth'), ',')
    tokens = filter(lambda token: token, tokens)

    for auth_token in tokens:
        _verifyProxyAuthToken(auth_token)

    # if no rhnFault was raised then the tokens all passed
    return 1
Exemplo n.º 6
0
def process_token(server, server_arch, tokens_obj, virt_type=None):
    """ perform registration tasks for a server as indicated by a token """
    assert (isinstance(tokens_obj, ActivationTokens))
    server_id = server['id']
    log_debug(1, server_id, tokens_obj.get_names())

    # Keep track of what we're doing
    history = {}

    # the tokens are confirmed, mark this server as using it and make
    # sure we're within limits
    check_token_limits(server_id, tokens_obj)

    is_reactivation = rhnFlags.test('re_registration_token')

    if is_reactivation:
        # If it's a re-registration, the server is already entitled
        history[
            "entitlement"] = "Re-activation: keeping previous entitlement level"
    else:
        tokens_obj.entitle(server_id, history, virt_type)

    # channels
    history["channels"] = token_channels(server, server_arch, tokens_obj)

    is_provisioning_entitled = None
    is_management_entitled = None

    if tokens_obj.has_entitlement_label('provisioning_entitled'):
        is_provisioning_entitled = 1

    if tokens_obj.has_entitlement_label('enterprise_entitled'):
        is_management_entitled = 1

    if is_reactivation:
        history["groups"] = ["Re-activation: keeping previous server groups"]
    else:
        # server groups - allowed for enterprise only
        if is_management_entitled or is_provisioning_entitled:
            history["groups"] = token_server_groups(server_id, tokens_obj)
        else:
            # FIXME:  better messaging about minimum service level
            history["groups"] = [
                "Not subscribed to any system groups: not entitled for "
                "RHN Management or RHN Provisioning"
            ]

    if is_provisioning_entitled:
        history["packages"] = token_packages(server_id, tokens_obj)
        history["config_channels"] = token_config_channels(server, tokens_obj)
    else:
        history["packages"] = [
            "Insufficient service level for automatic package installation."
        ]
        history["config_channels"] = [
            "Insufficient service level for config channel subscription."
        ]

    # build the report and send it back
    return history_report(history)
Exemplo n.º 7
0
 def __init__(self):
     # We store the security list in the global flags. This way, we don't
     # have to worry about clearing it up.
     if rhnFlags.test(self._flag_string):
         self.sec = rhnFlags.get(self._flag_string)
     else:
         self.sec = []
         rhnFlags.set(self._flag_string, self.sec)
Exemplo n.º 8
0
 def __init__(self):
     # We store the security list in the global flags. This way, we don't
     # have to worry about clearing it up.
     if rhnFlags.test(self._flag_string):
         self.sec = rhnFlags.get(self._flag_string)
     else:
         self.sec = []
         rhnFlags.set(self._flag_string, self.sec)
Exemplo n.º 9
0
def process_token(server, server_arch, tokens_obj, virt_type = None):
    """ perform registration tasks for a server as indicated by a token """
    assert(isinstance(tokens_obj, ActivationTokens))
    server_id = server['id']
    log_debug(1, server_id, tokens_obj.get_names())

    # Keep track of what we're doing
    history = {}

    # the tokens are confirmed, mark this server as using it and make
    # sure we're within limits
    check_token_limits(server_id, tokens_obj)

    is_reactivation = rhnFlags.test('re_registration_token')

    if is_reactivation:
        # If it's a re-registration, the server is already entitled
        history["entitlement"] = "Re-activation: keeping previous entitlement level"
    else:
        tokens_obj.entitle(server_id, history, virt_type)


    # channels
    history["channels"] = token_channels(server, server_arch, tokens_obj)


    is_provisioning_entitled = None
    is_management_entitled = None

    if tokens_obj.has_entitlement_label('provisioning_entitled'):
        is_provisioning_entitled = 1

    if tokens_obj.has_entitlement_label('enterprise_entitled'):
        is_management_entitled = 1

    if is_reactivation:
        history["groups"] = ["Re-activation: keeping previous server groups"]
    else:
        # server groups - allowed for enterprise only
        if is_management_entitled or is_provisioning_entitled:
            history["groups"] = token_server_groups(server_id, tokens_obj)
        else:
            # FIXME:  better messaging about minimum service level
            history["groups"] = [
                "Not subscribed to any system groups: not entitled for "
                "RHN Management or RHN Provisioning"
            ]

    if is_provisioning_entitled:
        history["packages"] = token_packages(server_id, tokens_obj)
        history["config_channels"] = token_config_channels(server,
            tokens_obj)
    else:
        history["packages"] = [ "Insufficient service level for automatic package installation." ]
        history["config_channels"] = [ "Insufficient service level for config channel subscription." ]

    # build the report and send it back
    return history_report(history)
Exemplo n.º 10
0
    def response_file(req, response):
        """ send a file out """

        log_debug(1, response.name)
        # We may set the content type remotely
        if rhnFlags.test("Content-Type"):
            req.content_type = rhnFlags.get("Content-Type")
        else:
            # Safe default
            req.content_type = "application/octet-stream"

        # find out the size of the file
        if response.length == 0:
            response.file_obj.seek(0, 2)
            size = response.file_obj.tell()
            response.file_obj.seek(0, 0)
        else:
            size = response.length
        req.headers_out["Content-Length"] = str(size)

        # if we loaded this from a real fd, set it as the X-Replace-Content
        # check for "name" since sometimes we get xmlrpclib.File's that have
        # a stringIO as the file_obj, and they dont have a .name (ie,
        # fileLists...)
        if response.name:
            req.headers_out["X-Package-FileName"] = response.name

        # yum can request for HTTP_PARTIAL_CONTENT content
        if req.headers_in.has_key('Range'):
            (start, end) = parse_byteranges(req.headers_in['Range'], size)
            req.headers_out["Accept-Ranges"] = "bytes"
            req.headers_out["Content-Range"] = get_content_range(
                start, end, size)
            size = end - start + 1
            response.file_obj.seek(start, 0)
            status = apache.HTTP_PARTIAL_CONTENT
        else:
            start = 0
            end = size
            status = apache.OK

        # send the headers
        req.send_http_header()
        # and the file
        buffer_size = CFG.BUFFER_SIZE
        while 1:
            if buffer_size > size:
                buffer_size = size
            buf = response.read(buffer_size)
            size = size - buffer_size
            if not buf:
                break
            try:
                req.write(buf)
            except IOError:
                return apache.HTTP_BAD_REQUEST
        response.close()
        return status
Exemplo n.º 11
0
    def response_file(req, response):
        """ send a file out """

        log_debug(1, response.name)
        # We may set the content type remotely
        if rhnFlags.test("Content-Type"):
            req.content_type = rhnFlags.get("Content-Type")
        else:
            # Safe default
            req.content_type = "application/octet-stream"

        # find out the size of the file
        if response.length == 0:
            response.file_obj.seek(0, 2)
            size = response.file_obj.tell()
            response.file_obj.seek(0, 0)
        else:
            size = response.length
        req.headers_out["Content-Length"] = str(size)

        # if we loaded this from a real fd, set it as the X-Replace-Content
        # check for "name" since sometimes we get xmlrpclib.File's that have
        # a stringIO as the file_obj, and they dont have a .name (ie,
        # fileLists...)
        if response.name:
            req.headers_out["X-Package-FileName"] = response.name

        # yum can request for HTTP_PARTIAL_CONTENT content
        if req.headers_in.has_key('Range'):
            (start, end)=parse_byteranges(req.headers_in['Range'], size)
            req.headers_out["Accept-Ranges"] = "bytes"
            req.headers_out["Content-Range"] = get_content_range(start, end, size)
            size = end - start + 1
            response.file_obj.seek(start, 0)
            status = apache.HTTP_PARTIAL_CONTENT
        else:
            start = 0
            end = size
            status = apache.OK

        # send the headers
        req.send_http_header()
        # and the file
        buffer_size = CFG.BUFFER_SIZE
        while 1:
            if buffer_size > size:
                buffer_size = size
            buf = response.read(buffer_size)
            size = size - buffer_size
            if not buf:
                break
            try:
                req.write(buf)
            except IOError:
                return apache.HTTP_BAD_REQUEST
        response.close()
        return status
Exemplo n.º 12
0
def auth_client():
    """ Authenticates a request from a client
        For an unsigned request, this function returns 0 (request should be
        coming from a client).
    """

    log_debug(3)
    if not rhnFlags.test("AUTH_SESSION_TOKEN"):
        # No auth information; decline any GET action (XMLRPC requests
        # ignore this error).
        log_debug(4, "declined client authentication for GET requests")
        return 0

    token = dict((k.lower(),v) for k,v in rhnFlags.get("AUTH_SESSION_TOKEN").items())
    # Check to see if everything we need to compute the signature is there
    for k in ('x-rhn-server-id',
              'x-rhn-auth-user-id',
              'x-rhn-auth',
              'x-rhn-auth-server-time',
              'x-rhn-auth-expire-offset'):
        if k not in token:
            # No auth information; decline any action
            log_debug(4, "Declined auth of client for GET requests; "
                         "incomplete header info.")
            return 0

    clientId = token['x-rhn-server-id']
    username = token['x-rhn-auth-user-id']
    signature = token['x-rhn-auth']
    rhnServerTime = token['x-rhn-auth-server-time']
    expireOffset = token['x-rhn-auth-expire-offset']


    computed = computeSignature(CFG.SECRET_KEY, clientId, username,
                                rhnServerTime, expireOffset)
    if computed != signature:
        log_debug(4, "Sent client signature %s does not match ours %s." % (
            signature, computed))
        raise rhnFault(33, "Invalid client session key")

    # Convert the expiration/time to floats:
    rhnServerTime = float(rhnServerTime)
    expireOffset = float(expireOffset)

    if rhnServerTime + expireOffset < time.time():
        log_debug(4, "Expired client authentication token")
        raise rhnFault(34, "Expired client authentication token")

    log_debug(4, "Client auth OK")
    return 1
Exemplo n.º 13
0
 def __getSessionToken():
     """ Get/test-for session token in headers (rhnFlags) """
     log_debug(1)
     if not rhnFlags.test("AUTH_SESSION_TOKEN"):
         raise rhnFault(33, "Missing session token")
     return rhnFlags.get("AUTH_SESSION_TOKEN")
Exemplo n.º 14
0
    def response_file(self, response):
        log_debug(3, response.name)
        # We may set the content type remotely
        if rhnFlags.test("Content-Type"):
            self.req.content_type = rhnFlags.get("Content-Type")
        else:
            # Safe default
            self.req.content_type = "application/octet-stream"

        # find out the size of the file
        if response.length == 0:
            response.file_obj.seek(0,2)
            file_size = response.file_obj.tell()
            response.file_obj.seek(0,0)
        else:
            file_size = response.length

        success_response = apache.OK
        response_size = file_size

        # Serve up the requested byte range
        if self.req.headers_in.has_key("Range"):
            try:
                range_start, range_end = \
                    byterange.parse_byteranges(self.req.headers_in["Range"],
                        file_size)
                response_size = range_end - range_start
                self.req.headers_out["Content-Range"] = \
                    byterange.get_content_range(range_start, range_end, file_size)
                self.req.headers_out["Accept-Ranges"] = "bytes"

                response.file_obj.seek(range_start)

                # We'll want to send back a partial content rather than ok
                # if this works
                self.req.status = apache.HTTP_PARTIAL_CONTENT
                success_response = apache.HTTP_PARTIAL_CONTENT

            # For now we will just return the file file on the following exceptions
            except byterange.InvalidByteRangeException:
                pass
            except byterange.UnsatisfyableByteRangeException:
                pass



        self.req.headers_out["Content-Length"] = str(response_size)

        # if we loaded this from a real fd, set it as the X-Replace-Content
        # check for "name" since sometimes we get xmlrpclib.transports.File's that have
        # a stringIO as the file_obj, and they dont have a .name (ie,
        # fileLists...)
        if response.name:
            self.req.headers_out["X-Package-FileName"] = response.name

        xrepcon = self.req.headers_in.has_key("X-Replace-Content-Active") \
                  and rhnFlags.test("Download-Accelerator-Path")
        if xrepcon:
            fpath = rhnFlags.get("Download-Accelerator-Path")
            log_debug(1, "Serving file %s" % fpath)
            self.req.headers_out["X-Replace-Content"] = fpath
            # Only set a byte rate if xrepcon is active
            byte_rate = rhnFlags.get("QOS-Max-Bandwidth")
            if byte_rate:
                self.req.headers_out["X-Replace-Content-Throttle"] = str(byte_rate)

        # send the headers
        self.req.send_http_header()

        if self.req.headers_in.has_key("Range"):
	    # and the file
	    read = 0
	    while read < response_size:
		# We check the size here in case we're not asked for the entire file.
		if (read + CFG.BUFFER_SIZE > response_size):
		    to_read = read + CFG.BUFFER_SIZE - response_size
		else:
		    to_read = CFG.BUFFER_SIZE
		buf = response.read(CFG.BUFFER_SIZE)
		if not buf:
		    break
		try:
		    self.req.write(buf)
		    read = read + CFG.BUFFER_SIZE
		except IOError:
		    if xrepcon:
			# We're talking to a proxy, so don't bother to report
			# a SIGPIPE
			break
		    return apache.HTTP_BAD_REQUEST
	    response.close()
        else:
            if 'wsgi.file_wrapper' in self.req.headers_in:
		self.req.output = self.req.headers_in['wsgi.file_wrapper'](response, CFG.BUFFER_SIZE)
	    else:
		self.req.output = iter(lambda: response.read(CFG.BUFFER_SIZE), '')

        return success_response
Exemplo n.º 15
0
class apacheHandler(rhnApache):
    """ Main apache entry point for the proxy. """
    _lang_catalog = "proxy"

    def __init__(self):
        rhnApache.__init__(self)
        self.input = None
        self._component = None

    def set_component(self, component):
        self._component = component

    @staticmethod
    def _setSessionToken(headers):
        # extended to always return a token, even if an empty one
        ret = rhnApache._setSessionToken(headers)
        if ret:
            log_debug(4, "Returning", ret)
            return ret

        # Session token did not verify, we have an empty auth token
        token = UserDictCase()
        rhnFlags.set("AUTH_SESSION_TOKEN", token)
        return token

    def headerParserHandler(self, req):
        """ Name-munging if request came from anaconda in response to a
            kickstart. """
        ret = rhnApache.headerParserHandler(self, req)
        if ret != apache.OK:
            return ret

        self.input = rpclib.transports.Input(req.headers_in)

        # Before we allow the main handler code to commence, we'll first check
        # to see if this request came from anaconda in response to a kickstart.
        # If so, we'll need to do some special name-munging before we continue.

        ret = self._transformKickstartRequest(req)
        return ret

    def _transformKickstartRequest(self, req):
        """ If necessary, this routine will transform a "tinified" anaconda-
            generated kickstart request into a normalized form capable of being
            cached effectively by squid.

            This is done by first making a HEAD request
            to the satellite for the purpose of updating the kickstart progress and
            retrieving an MD5 sum for the requested file.  We then replace the
            tinyURL part of the URI with the retrieved MD5 sum.  This effectively
            removes session-specific information while allowing us to still cache
            based on the uniqueness of the file.
        """
        # Kickstart requests only come in the form of a GET, so short-circuit
        # if that is not the case.

        if (req.method != "GET"):
            return apache.OK

        log_debug(6, "URI", req.uri)
        log_debug(6, "COMPONENT", self._component)

        # If we're a broker, we know that this is a kickstart request from
        # anaconda by checking if the URI begins with /ty/*, otherwise just
        # return.  If we're an SSL redirect, we check that the URI begins with
        # /ty-cksm/*, otherwise return.

        if self._component == COMPONENT_BROKER:
            if req.uri.startswith(URI_PREFIX_KS):
                log_debug(3, "Found a kickstart URI: %s" % req.uri)
                return self._transformKsRequestForBroker(req)
        elif self._component == COMPONENT_REDIRECT:
            if req.uri.startswith(URI_PREFIX_KS_CHECKSUM):
                log_debug(3, "Found a kickstart checksum URI: %s" % req.uri)
                return self._transformKsRequestForRedirect(req)

        return apache.OK

    def _transformKsRequestForBroker(self, req):

        # Get the checksum for the requested resource from the satellite.

        (status, checksum) = self._querySatelliteForChecksum(req)
        if status != apache.OK or not checksum:
            return status

        # If we got this far, we have the checksum.  Create a new URI based on
        # the checksum.

        newURI = self._generateCacheableKickstartURI(req.uri, checksum)
        if not newURI:
            # Couldn't create a cacheable URI, log an error and revert to
            # BZ 158236 behavior.

            log_error('Could not create cacheable ks URI from "%s"' % req.uri)
            return apache.OK

        # Now we must embed the old URI into a header in the original request
        # so that the SSL Redirect has it available if the resource has not
        # been cached yet.  We will also embed a header that holds the new URI,
        # so that the content handler can use it later.

        log_debug(3, "Generated new kickstart URI: %s" % newURI)
        req.headers_in[HEADER_ACTUAL_URI] = req.uri
        req.headers_in[HEADER_EFFECTIVE_URI] = newURI

        return apache.OK

    @staticmethod
    def _transformKsRequestForRedirect(req):

        # If we don't get the actual URI in the headers, we'll decline the
        # request.

        if not req.headers_in or not req.headers_in.has_key(HEADER_ACTUAL_URI):
            log_error("Kickstart request header did not include '%s'" %
                      HEADER_ACTUAL_URI)
            return apache.DECLINED

        # The original URI is embedded in the headers under X-RHN-ActualURI.
        # Remove it, and place it in the X-RHN-EffectiveURI header.

        req.headers_in[HEADER_EFFECTIVE_URI] = req.headers_in[
            HEADER_ACTUAL_URI]
        log_debug(
            3, "Reverting to old URI: %s" % req.headers_in[HEADER_ACTUAL_URI])

        return apache.OK

    def _querySatelliteForChecksum(self, req):
        """ Sends a HEAD request to the satellite for the purpose of obtaining
            the checksum for the requested resource.  A (status, checksum)
            tuple is returned.  If status is not apache.OK, checksum will be
            None.  If status is OK, and a checksum is not returned, the old
            BZ 158236 behavior will be used.
        """
        scheme = SCHEME_HTTP
        if req.server.port == 443:
            scheme = SCHEME_HTTPS
        log_debug(6, "Using scheme: %s" % scheme)

        # Initiate a HEAD request to the satellite to retrieve the MD5 sum.
        # Actually, we make the request through our own proxy first, so
        # that we don't accidentally bypass necessary authentication
        # routines.  Since it's a HEAD request, the proxy will forward it
        # directly to the satellite like it would a POST request.

        host = "127.0.0.1"
        port = req.connection.local_addr[1]

        connection = self._createConnection(host, port, scheme)
        if not connection:
            # Couldn't form the connection.  Log an error and revert to the
            # old BZ 158236 behavior.  In order to be as robust as possible,
            # we won't fail here.

            log_error('HEAD req - Could not create connection to %s://%s:%s' %
                      (scheme, host, str(port)))
            return (apache.OK, None)

        # We obtained the connection successfully.  Construct the URL that
        # we'll connect to.

        pingURL = "%s://%s:%s%s" % (scheme, host, str(port), req.uri)
        log_debug(6, "Ping URI: %s" % pingURL)

        hdrs = UserDictCase()
        for k in req.headers_in.keys():
            if k.lower() != 'range':  # we want checksum of whole file
                hdrs[k] = req.headers_in[k]

        log_debug(9, "Using existing headers_in", hdrs)
        connection.request("HEAD", pingURL, None, hdrs)
        log_debug(6, "Connection made, awaiting response.")

        # Get the response.

        response = connection.getresponse()
        log_debug(6, "Received response status: %s" % response.status)
        connection.close()

        if (response.status != apache.HTTP_OK) and (
                response.status != apache.HTTP_PARTIAL_CONTENT):
            # Something bad happened.  Return back back to the client.

            log_debug(
                1, "HEAD req - Received error code in reponse: %s" %
                (str(response.status)))
            return (response.status, None)

        # The request was successful.  Dig the MD5 checksum out of the headers.

        responseHdrs = response.msg
        if not responseHdrs:
            # No headers?!  This shouldn't happen at all.  But if it does,
            # revert to the old # BZ 158236 behavior.

            log_error("HEAD response - No HTTP headers!")
            return (apache.OK, None)

        if not responseHdrs.has_key(HEADER_CHECKSUM):
            # No checksum was provided.  This could happen if a newer
            # proxy is talking to an older satellite.  To keep things
            # running smoothly, we'll just revert to the BZ 158236
            # behavior.

            log_debug(1, "HEAD response - No X-RHN-Checksum field provided!")
            return (apache.OK, None)

        checksum = responseHdrs[HEADER_CHECKSUM]

        return (apache.OK, checksum)

    @staticmethod
    def _generateCacheableKickstartURI(oldURI, checksum):
        """
        This routine computes a new cacheable URI based on the old URI and the
        checksum. For example, if the checksum is 1234ABCD and the oldURI was:

            /ty/AljAmCEt/RedHat/base/comps.xml

        Then, the new URI will be:

            /ty-cksm/1234ABCD/RedHat/base/comps.xml

        If for some reason the new URI could not be generated, return None.
        """

        newURI = URI_PREFIX_KS_CHECKSUM + checksum

        # Strip the first two path pieces off of the oldURI.

        uriParts = oldURI.split('/')
        numParts = 0
        for part in uriParts:
            if len(part) is not 0:  # Account for double slashes ("//")
                numParts += 1
                if numParts > 2:
                    newURI += "/" + part

        # If the URI didn't have enough parts, return None.

        if numParts <= 2:
            newURI = None

        return newURI

    @staticmethod
    def _createConnection(host, port, scheme):
        params = {'host': host, 'port': port}

        if CFG.has_key('timeout'):
            params['timeout'] = CFG.TIMEOUT

        if scheme == SCHEME_HTTPS:
            conn_class = connections.HTTPSConnection
        else:
            conn_class = connections.HTTPConnection

        return conn_class(**params)

    def handler(self, req):
        """ Main handler to handle all requests pumped through this server. """

        ret = rhnApache.handler(self, req)
        if ret != apache.OK:
            return ret

        log_debug(4, "METHOD", req.method)
        log_debug(4, "PATH_INFO", req.path_info)
        log_debug(4, "URI (full path info)", req.uri)
        log_debug(4, "Component", self._component)

        if self._component == COMPONENT_BROKER:
            from broker import rhnBroker
            handlerObj = rhnBroker.BrokerHandler(req)
        else:
            # Redirect
            from redirect import rhnRedirect
            handlerObj = rhnRedirect.RedirectHandler(req)

        try:
            ret = handlerObj.handler()
        except rhnFault, e:
            return self.response(req, e)

        if rhnFlags.test("NeedEncoding"):
            return self.response(req, ret)

        # All good; we expect ret to be an HTTP return code
        if not isinstance(ret, type(1)):
            raise rhnException("Invalid status code type %s" % type(ret))
        log_debug(1, "Leaving with status code %s" % ret)
        return ret
Exemplo n.º 16
0
    def response(self, req, response):
        """ send the response (common code) """

        # Send the xml-rpc response back
        log_debug(5, "Response type", type(response))

        needs_xmlrpc_encoding = rhnFlags.test("NeedEncoding")
        compress_response = rhnFlags.test("compress_response")
        # Init an output object; we'll use it for sending data in various
        # formats
        if isinstance(response, rpclib.transports.File):
            if not hasattr(response.file_obj, 'fileno') and compress_response:
                # This is a StringIO that has to be compressed, so read it in
                # memory; mark that we don't have to do any xmlrpc encoding
                response = response.file_obj.read()
                needs_xmlrpc_encoding = 0
            else:
                # Just treat is as a file
                return self.response_file(req, response)

        is_fault = 0
        if isinstance(response, rhnFault):
            if req.method == 'GET':
                return self._response_fault_get(req, response.getxml())
            # Need to encode the response as xmlrpc
            response = response.getxml()
            is_fault = 1
            # No compression
            compress_response = 0
            # This is an xmlrpc Fault, so we have to encode it
            needs_xmlrpc_encoding = 1

        output = rpclib.transports.Output()

        if not is_fault:
            # First, use the same encoding/transfer that the client used
            output.set_transport_flags(
                transfer=rpclib.transports.lookupTransfer(self.input.transfer),
                encoding=rpclib.transports.lookupEncoding(self.input.encoding))

        if compress_response:
            # check if we have to compress this result
            log_debug(4, "Compression on for client version",
                      self.clientVersion)
            if self.clientVersion > 0:
                output.set_transport_flags(output.TRANSFER_BINARY,
                                           output.ENCODE_ZLIB)
            else:  # original clients had the binary transport support broken
                output.set_transport_flags(output.TRANSFER_BASE64,
                                           output.ENCODE_ZLIB)

        # We simply add the transport options to the output headers
        output.headers.update(rhnFlags.get('outputTransportOptions').dict())

        if needs_xmlrpc_encoding:
            # Normalize the response
            response = self.normalize(response)
            try:
                response = rpclib.xmlrpclib.dumps(response, methodresponse=1)
            except TypeError, e:
                log_debug(
                    -1, "Error \"%s\" encoding response = %s" % (e, response))
                Traceback("apacheHandler.response",
                          req,
                          extra="Error \"%s\" encoding response = %s" %
                          (e, response),
                          severity="notification")
                return apache.HTTP_INTERNAL_SERVER_ERROR
            except Exception:  # pylint: disable=E0012, W0703
                # Uncaught exception; signal the error
                Traceback("apacheHandler.response", req, severity="unhandled")
                return apache.HTTP_INTERNAL_SERVER_ERROR
Exemplo n.º 17
0
    def response_file(req, response):
        """ send a file out """
        log_debug(3, response.name)
        # We may set the content type remotely
        if rhnFlags.test("Content-Type"):
            req.content_type = rhnFlags.get("Content-Type")
        else:
            # Safe default
            req.content_type = "application/octet-stream"

        # find out the size of the file
        if response.length == 0:
            response.file_obj.seek(0, 2)
            file_size = response.file_obj.tell()
            response.file_obj.seek(0, 0)
        else:
            file_size = response.length

        success_response = apache.OK
        response_size = file_size

        # Serve up the requested byte range
        if req.headers_in.has_key("Range"):
            try:
                range_start, range_end = \
                    byterange.parse_byteranges(req.headers_in["Range"],
                                               file_size)
                response_size = range_end - range_start
                req.headers_out["Content-Range"] = \
                    byterange.get_content_range(range_start, range_end, file_size)
                req.headers_out["Accept-Ranges"] = "bytes"

                response.file_obj.seek(range_start)

                # We'll want to send back a partial content rather than ok
                # if this works
                req.status = apache.HTTP_PARTIAL_CONTENT
                success_response = apache.HTTP_PARTIAL_CONTENT

            # For now we will just return the file file on the following exceptions
            except byterange.InvalidByteRangeException:
                pass
            except byterange.UnsatisfyableByteRangeException:
                pass

        req.headers_out["Content-Length"] = str(response_size)

        # if we loaded this from a real fd, set it as the X-Replace-Content
        # check for "name" since sometimes we get xmlrpclib.transports.File's that have
        # a stringIO as the file_obj, and they dont have a .name (ie,
        # fileLists...)
        if response.name:
            req.headers_out["X-Package-FileName"] = response.name

        xrepcon = req.headers_in.has_key("X-Replace-Content-Active") \
            and rhnFlags.test("Download-Accelerator-Path")
        if xrepcon:
            fpath = rhnFlags.get("Download-Accelerator-Path")
            log_debug(1, "Serving file %s" % fpath)
            req.headers_out["X-Replace-Content"] = fpath
            # Only set a byte rate if xrepcon is active
            byte_rate = rhnFlags.get("QOS-Max-Bandwidth")
            if byte_rate:
                req.headers_out["X-Replace-Content-Throttle"] = str(byte_rate)

        # send the headers
        req.send_http_header()

        if req.headers_in.has_key("Range"):
            # and the file
            read = 0
            while read < response_size:
                # We check the size here in case we're not asked for the entire file.
                buf = response.read(CFG.BUFFER_SIZE)
                if not buf:
                    break
                try:
                    req.write(buf)
                    read = read + CFG.BUFFER_SIZE
                except IOError:
                    if xrepcon:
                        # We're talking to a proxy, so don't bother to report
                        # a SIGPIPE
                        break
                    return apache.HTTP_BAD_REQUEST
            response.close()
        else:
            if 'wsgi.file_wrapper' in req.headers_in:
                req.output = req.headers_in['wsgi.file_wrapper'](
                    response, CFG.BUFFER_SIZE)
            else:
                req.output = iter(lambda: response.read(CFG.BUFFER_SIZE), '')
        return success_response
Exemplo n.º 18
0
    def create_system(self, user, profile_name, release_version,
                                  architecture, data):
        """
        Create a system based on the input parameters.

        Return dict containing a server object for now.
        Called by new_system (< rhel5)
              and new_system_user_pass | new_system_activation_key (>= rhel5)
        """

        if profile_name is not None and not \
           rhnFlags.test("re_registration_token") and \
           len(profile_name) < 1:
           raise rhnFault(800)

        # log entry point
        if data.has_key("token"):
            log_item = "token = '%s'" % data["token"]
        else:
            log_item = "username = '******'" % user.username

        log_debug(1, log_item, release_version, architecture)

        # Fetch the applet's UUID
        if data.has_key("uuid"):
            applet_uuid = data['uuid']
            log_debug(3, "applet uuid", applet_uuid)
        else:
            applet_uuid = None

        # Fetch the up2date UUID
        if data.has_key("rhnuuid"):
            up2date_uuid = data['rhnuuid']
            log_debug(3, "up2date uuid", up2date_uuid)
            # XXX Should somehow check the uuid uniqueness
            #raise rhnFault(105, "A system cannot be registered multiple times")
        else:
            up2date_uuid = None

        release = str(release_version)

        if data.has_key('token'):
            token_string = data['token']
            # Look the token up; if the token does not exist or is invalid,
            # stop right here (search_token raises the appropriate rhnFault)
            tokens_obj = rhnServer.search_token(token_string)
            log_user_id = tokens_obj.get_user_id()
        else:
            # user should not be null here
            log_user_id = user.getid()
            tokens_obj = rhnServer.search_org_token(user.contact["org_id"])
            log_debug(3,"universal_registration_token set as %s" %
                        str(tokens_obj.get_tokens()))
            rhnFlags.set("universal_registration_token", tokens_obj)

        if data.has_key('channel') and len(data['channel']) > 0:
            channel = data['channel']
            log_debug(3, "requested EUS channel: %s" % str(channel))
        else:
            channel = None

        newserv = None
        if tokens_obj:
            # Only set registration_token if we have token(s) available.
            # server_token.ActivationTokens.__nonzero__ should do the right
            # thing of filtering the case of no tokens
            rhnFlags.set("registration_token", tokens_obj)
            # Is the token associated with a server?
            if tokens_obj.is_rereg_token:
                # Also flag it's a re-registration token
                rhnFlags.set("re_registration_token", tokens_obj)
                # Load the server object
                newserv = rhnServer.search(tokens_obj.get_server_id())
                newserv.disable_token()
                # The old hardware info no longer applies
                newserv.delete_hardware()
                # Update the arch - it may have changed; we know the field was
                # provided for us
                newserv.set_arch(architecture)
                newserv.user = rhnUser.User("", "")
                newserv.user.reload(newserv.server['creator_id'])
                # Generate a new secret for this server
                newserv.gen_secret()
                # Get rid of the old package profile - it's bogus in this case
                newserv.dispose_packages()
                # The new server may have a different base channel
                newserv.change_base_channel(release)

        if newserv is None:
            # Not a re-registration token, we need a fresh server object
            rhnSQL.set_log_auth(log_user_id)
            newserv = rhnServer.Server(user, architecture)


        # Proceed with using the rest of the data
        newserv.server["release"] = release
        if data.has_key('release_name'):
            newserv.server["os"] = data['release_name']

        ## add the package list
        if data.has_key('packages'):
            for package in data['packages']:
                newserv.add_package(package)
        # add the hardware profile
        if data.has_key('hardware_profile'):
            for hw in data['hardware_profile']:
                newserv.add_hardware(hw)
        # fill in the other details from the data dictionary
        if profile_name is not None and not \
           rhnFlags.test("re_registration_token"):
            newserv.server["name"] = profile_name[:128]
        if data.has_key("os"):
            newserv.server["os"] = data["os"][:64]
        if data.has_key("description"):
            newserv.server["description"] = data["description"][:256]
        else:
            newserv.default_description()

        # Check for virt params
        # Get the uuid, if there is one.
        if data.has_key('virt_uuid'):
            virt_uuid = data['virt_uuid']
            if virt_uuid is not None \
               and not rhnVirtualization.is_host_uuid(virt_uuid):
                # If we don't have a virt_type key, we'll assume PARA.
                virt_type = None
                if data.has_key('virt_type'):
                    virt_type = data['virt_type']
                    if virt_type == 'para':
                        virt_type = rhnVirtualization.VirtualizationType.PARA
                    elif virt_type == 'fully':
                        virt_type = rhnVirtualization.VirtualizationType.FULLY
                    else:
                        raise Exception(
                            "Unknown virtualization type: %s" % virt_type)
                else:
                    raise Exception("Virtualization type not provided")
                newserv.virt_uuid = virt_uuid
                newserv.virt_type = virt_type
            else:
                newserv.virt_uuid = None
                newserv.virt_type = None
        else:
            newserv.virt_uuid = None
            newserv.virt_type = None


        # If we didn't find virt info from xen, check smbios
        if data.has_key('smbios') and newserv.virt_uuid is None:
            (newserv.virt_type, newserv.virt_uuid) = \
                    parse_smbios(data['smbios'])

        if tokens_obj.forget_rereg_token:
	    # At this point we retained the server with re-activation
	    # let the stacked activation keys do their magic
            tokens_obj.is_rereg_token = 0
            rhnFlags.set("re_registration_token", 0)

        # now if we have a token, load the extra registration
        # information from the token
        if rhnFlags.test("registration_token"):
            # Keep the original "info" field
            newserv.load_token()
            # we need to flush the registration information into the
            # database so we can proceed with processing the rest of
            # the token information (like subscribing the server to
            # groups, channels, etc)

            # bretm 02/19/2007 -- this shouldn't throw any of the following:
            #   SubscriptionCountExceeded
            #   BaseChannelDeniedError
            #   NoBaseChannelError
            # since we have the token object, and underneath the hood, we have none_ok=have_token

            # BUT - it does. So catch them and throw. this will make rhnreg_ks
            # die out, but oh well. at least they don't end up registered, and
            # without a base channel.
            try:
                # don't commit
                newserv.save(0, channel)
            except (rhnChannel.SubscriptionCountExceeded,
                    rhnChannel.NoBaseChannelError), channel_error:
                raise rhnFault(70), None, sys.exc_info()[2]
            except rhnChannel.BaseChannelDeniedError, channel_error:
                raise rhnFault(71), None, sys.exc_info()[2]
Exemplo n.º 19
0
def store_client_route(server_id):
    """ Stores the route the client took to get to hosted or the Satellite """

    log_debug(5, server_id)

    # get the old routing information for this server_id
    # oldRoute in this format: [(id0, hostname0),  (id1, hostname1),  ...]
    #                           closest to client, ..., closest to server
    h = rhnSQL.prepare("""
        select position,
               proxy_server_id,
               hostname
          from rhnServerPath
         where server_id = :server_id
        order by position
        """)
    h.execute(server_id=server_id)
    oldRoute = h.fetchall_dict() or []
    newRoute = []

    # code block if there *is* routing info in the headers
    # NOTE: X-RHN-Proxy-Auth described in proxy/broker/rhnProxyAuth.py
    if rhnFlags.test('X-RHN-Proxy-Auth'):
        tokens = string.split(rhnFlags.get('X-RHN-Proxy-Auth'), ',')
        tokens = filter(lambda token: token, tokens)

        log_debug(4, "route tokens", tokens)
        # newRoute in this format: [(id0, hostname0),  (id1, hostname1),  ...]
        #                           closest to client, ..., closest to server
        for token in tokens:
            token, hostname = apacheAuth.splitProxyAuthToken(token)
            if hostname is None:
                log_debug(
                    3,
                    "NOTE: Spacewalk Proxy v1.1 detected - route tracking is unsupported"
                )
                newRoute = []
                break
            newRoute.append((token[0], hostname))

        log_debug(4, "newRoute", newRoute)

    if oldRoute == newRoute:
        # Nothing to do here
        # This also catches the case of no routes at all
        return

    if oldRoute:
        # blow away table rhnServerPath entries for server_id
        log_debug(8, 'blow away route-info for %s' % server_id)
        h = rhnSQL.prepare("""
            delete from rhnServerPath where server_id = :server_id
        """)
        h.execute(server_id=server_id)

    if not newRoute:
        log_debug(3, "No new route to add")
        rhnSQL.commit()
        return

    log_debug(8, 'adding route-info entries: %s - %s' % (server_id, newRoute))

    h = rhnSQL.prepare("""
        insert into rhnServerPath
               (server_id, proxy_server_id, position, hostname)
        values (:server_id, :proxy_server_id, :position, :hostname)
    """)
    server_ids = []
    proxy_ids = []
    proxy_hostnames = []
    positions = []
    counter = 0
    for p in newRoute:
        proxy_id, proxy_hostname = p[:2]
        proxy_ids.append(proxy_id)
        proxy_hostnames.append(proxy_hostname)
        server_ids.append(server_id)
        positions.append(counter)
        counter = counter + 1

    log_debug(5, server_ids, proxy_ids, positions, proxy_hostnames)
    h.executemany(server_id=server_ids,
                  proxy_server_id=proxy_ids,
                  position=positions,
                  hostname=proxy_hostnames)

    rhnSQL.commit()
Exemplo n.º 20
0
    def response(self, response):
        # Send the xml-rpc response back
        log_debug(3, type(response))
        needs_xmlrpc_encoding = not rhnFlags.test("XMLRPC-Encoded-Response")
        compress_response = rhnFlags.test("compress_response")
        # Init an output object; we'll use it for sending data in various
        # formats
        if isinstance(response, transports.File):
            if not hasattr(response.file_obj, 'fileno') and compress_response:
                # This is a StringIO that has to be compressed, so read it in
                # memory; mark that we don't have to do any xmlrpc encoding
                response = response.file_obj.read()
                needs_xmlrpc_encoding = 0
            else:
                # Just treat is as a file
                return self.response_file(response)

        output = transports.Output()

        # First, use the same encoding/transfer that the client used
        output.set_transport_flags(
            transfer=transports.lookupTransfer(self.input.transfer),
            encoding=transports.lookupEncoding(self.input.encoding))

        if isinstance(response, xmlrpclib.Fault):
            log_debug(4, "Return FAULT",
                      response.faultCode, response.faultString)
            # No compression for faults because we'd like them to pop
            # up in clear text on the other side just in case
            output.set_transport_flags(output.TRANSFER_NONE, output.ENCODE_NONE)
        elif compress_response:
            # check if we have to compress this result
            log_debug(4, "Compression on for client version", self.client)
            if self.client > 0:
                output.set_transport_flags(output.TRANSFER_BINARY,
                                           output.ENCODE_ZLIB)
            else:  # original clients had the binary transport support broken
                output.set_transport_flags(output.TRANSFER_BASE64,
                                           output.ENCODE_ZLIB)

        # We simply add the transport options to the output headers
        output.headers.update(rhnFlags.get('outputTransportOptions').dict())

        if needs_xmlrpc_encoding:
            # Normalize the response
            response = self.normalize(response)
            try:
                response = xmlrpclib.dumps(response, methodresponse=1)
            except TypeError:
                e = sys.exc_info()[1]
                log_debug(4, "Error \"%s\" encoding response = %s" % (e, response))
                Traceback("apacheHandler.response", self.req,
                          extra="Error \"%s\" encoding response = %s" % (e, response),
                          severity="notification")
                return apache.HTTP_INTERNAL_SERVER_ERROR
            except:
                # Uncaught exception; signal the error
                Traceback("apacheHandler.response", self.req,
                          severity="unhandled")
                return apache.HTTP_INTERNAL_SERVER_ERROR

        # we're about done here, patch up the headers
        output.process(response)
        # Copy the rest of the fields
        for k, v in output.headers.items():
            if string.lower(k) == 'content-type':
                # Content-type
                self.req.content_type = v
            else:
                setHeaderValue(self.req.headers_out, k, v)

        if 5 <= CFG.DEBUG < 10:
            log_debug(5, "The response: %s[...SNIP (for sanity) SNIP...]%s" % (response[:100], response[-100:]))
        elif CFG.DEBUG >= 10:
            # if you absolutely must have that whole response in the log file
            log_debug(10, "The response: %s" % response)

        # send the headers
        self.req.send_http_header()
        try:
            # XXX: in case data is really large maybe we should split
            # it in smaller chunks instead of blasting everything at
            # once. Not yet a problem...
            self.req.write(output.data)
        except IOError:
            # send_http_header is already sent, so it doesn't make a lot of
            # sense to return a non-200 error; but there is no better solution
            return apache.HTTP_BAD_REQUEST
        del output
        return apache.OK
Exemplo n.º 21
0
    def __init__(self, hw=None, guest=None):
        log_debug(4, hw, guest)
        if not hw or "identifier" not in hw or not guest:
            # incomplete data
            log_debug(1, "incomplete data")
            return
        host = rhnSQL.Row("rhnServer", "digital_server_id")
        host.load(hw['identifier'])
        hid = host.get('id')
        guest.user = rhnUser.User("", "")
        guest.user.reload(guest.server['creator_id'])
        guestid = guest.getid()
        if not hid:
            # create a new host entry
            host = server_class.Server(guest.user, hw.get('arch'))
            host.server["name"] = hw.get('name')
            host.server["os"] = hw.get('os')
            host.server["release"] = hw.get('type')
            host.server["last_boot"] = time.time()
            host.default_description()
            host.virt_type = rhnVirtualization.VirtualizationType.FULLY
            host.virt_uuid = None
            fake_token = False
            if not rhnFlags.test("registration_token"):
                # we need to fake it
                rhnFlags.set("registration_token", 'fake')
                fake_token = True
            host.save(1, None)
            host.server["digital_server_id"] = hw['identifier']
            entitle_server = rhnSQL.Procedure(
                "rhn_entitlements.entitle_server")
            entitle_server(host.getid(), 'foreign_entitled')
            host.save(1, None)
            if fake_token:
                rhnFlags.set("registration_token", None)

            hid = host.getid()
            host.reload(hid)
            log_debug(4, "New host created: ", host)
        else:
            host = server_class.Server(None)
            host.reload(hid)
            host.checkin(commit=0)
            log_debug(4, "Found host: ", host)
        host.reload_hardware()
        hostcpu = host.hardware_by_class(CPUDevice)
        if hostcpu and len(hostcpu) > 0:
            hostcpu = hostcpu[0].data
        else:
            hostcpu = None
        if not hostcpu or str(hostcpu.get('nrsocket')) != hw.get('total_ifls'):
            # update only if the number has changed
            log_debug(1, "update host cpu:", hw.get('total_ifls'))
            cpu = {
                'class': 'CPU',
                'desc': 'Processor',
                'count': hw.get('total_ifls'),
                'model_ver': '',
                'speed': '0',
                'cache': '',
                'model_number': '',
                'bogomips': '',
                'socket_count': hw.get('total_ifls'),
                'platform': hw.get('arch'),
                'other': '',
                'model_rev': '',
                'model': hw.get('arch'),
                'type': hw.get('type')
            }
            host.delete_hardware()
            host.add_hardware(cpu)
            host.save_hardware()

        h = rhnSQL.prepare("""
            select host_system_id
              from rhnVirtualInstance
             where virtual_system_id = :guestid""")
        h.execute(guestid=guestid)
        row = h.fetchone_dict()
        if not row or not row['host_system_id']:
            self._insert_virtual_instance(hid, None, fakeuuid=False)
            self._insert_virtual_instance(hid, guestid, fakeuuid=True)
        elif row['host_system_id'] != hid:
            log_debug(4, "update_virtual_instance", hid, guestid)
            q_update = rhnSQL.prepare("""
                UPDATE rhnVirtualInstance
                   SET host_system_id = :host_id
                 WHERE virtual_system_id = :guest_id
                   AND host_system_id = :old_host_id
            """)
            q_update.execute(host_id=hid,
                             guest_id=guestid,
                             old_host_id=row['host_system_id'])
Exemplo n.º 22
0
    def __save(self, channel):
        if self.server.real:
            server_id = self.server["id"]
            self.server.save()
        else:  # create new entry
            self.gen_secret()
            server_id = self.getid()
            org_id = self.server["org_id"]

            if self.user:
                user_id = self.user.getid()
            else:
                user_id = None

            # some more default values
            self.server["auto_update"] = "N"
            if self.user and not self.server.has_key("creator_id"):
                # save the link to the user that created it if we have
                # that information
                self.server["creator_id"] = self.user.getid()
            # and create the server entry
            self.server.create(server_id)
            server_lib.create_server_setup(server_id, org_id)

            have_reg_token = rhnFlags.test("registration_token")

            # Handle virtualization specific bits
            if self.virt_uuid is not None and \
               self.virt_type is not None:
                rhnVirtualization._notify_guest(self.getid(),
                                                self.virt_uuid, self.virt_type)

            # if we're using a token, then the following channel
            # subscription request can allow no matches since the
            # token code will fix up or fail miserably later.
            # subscribe the server to applicable channels

            # bretm 02/17/2007 -- TODO:  refactor activation key codepaths
            # to allow us to not have to pass in none_ok=1 in any case
            #
            # This can now throw exceptions which will be caught at a higher level
            if channel is not None:
                channel_info = dict(rhnChannel.channel_info(channel))
                log_debug(4, "eus channel id %s" % str(channel_info))
                rhnChannel._subscribe_sql(server_id, channel_info['id'])
            else:
                rhnChannel.subscribe_server_channels(self,
                                                     none_ok=have_reg_token,
                                                     user_id=user_id)

            if not have_reg_token:
                # Attempt to auto-entitle, can throw the following exceptions:
                #   rhnSystemEntitlementException
                #   rhnNoSystemEntitlementsException
                self.autoentitle()

                # If a new server that was registered by an user (i.e. not
                # with a registration token), look for this user's default
                # groups
                self.join_groups()

            server_lib.join_rhn(org_id)
        # Update the uuid - but don't commit yet
        self.update_uuid(self.uuid, commit=0)

        self.create_perm_cache()
        # And save the extra profile data...
        self.save_packages_byid(server_id, schedule=1)
        self.save_hardware_byid(server_id)
        self.save_history_byid(server_id)
        return 0
Exemplo n.º 23
0
 def __getSessionToken(self):
     """ Get/test-for session token in headers (rhnFlags) """
     log_debug(1)
     if not rhnFlags.test("AUTH_SESSION_TOKEN"):
         raise rhnFault(33, "Missing session token")
     return rhnFlags.get("AUTH_SESSION_TOKEN")
Exemplo n.º 24
0
    def response(self, req, response):
        """ send the response (common code) """

        # Send the xml-rpc response back
        log_debug(5, "Response type", type(response))

        needs_xmlrpc_encoding = rhnFlags.test("NeedEncoding")
        compress_response = rhnFlags.test("compress_response")
        # Init an output object; we'll use it for sending data in various
        # formats
        if isinstance(response, rpclib.transports.File):
            if not hasattr(response.file_obj, 'fileno') and compress_response:
                # This is a StringIO that has to be compressed, so read it in
                # memory; mark that we don't have to do any xmlrpc encoding
                response = response.file_obj.read()
                needs_xmlrpc_encoding = 0
            else:
                # Just treat is as a file
                return self.response_file(req, response)

        is_fault = 0
        if isinstance(response, rhnFault):
            if req.method == 'GET':
                return self._response_fault_get(req, response.getxml())
            # Need to encode the response as xmlrpc
            response = response.getxml()
            is_fault = 1
            # No compression
            compress_response = 0
            # This is an xmlrpc Fault, so we have to encode it
            needs_xmlrpc_encoding = 1

        output = rpclib.transports.Output()

        if not is_fault:
            # First, use the same encoding/transfer that the client used
            output.set_transport_flags(
                transfer=rpclib.transports.lookupTransfer(self.input.transfer),
                encoding=rpclib.transports.lookupEncoding(self.input.encoding))

        if compress_response:
            # check if we have to compress this result
            log_debug(4, "Compression on for client version", self.clientVersion)
            if self.clientVersion > 0:
                output.set_transport_flags(output.TRANSFER_BINARY,
                                           output.ENCODE_ZLIB)
            else:  # original clients had the binary transport support broken
                output.set_transport_flags(output.TRANSFER_BASE64,
                                           output.ENCODE_ZLIB)

        # We simply add the transport options to the output headers
        output.headers.update(rhnFlags.get('outputTransportOptions').dict())

        if needs_xmlrpc_encoding:
            # Normalize the response
            response = self.normalize(response)
            try:
                response = rpclib.xmlrpclib.dumps(response, methodresponse=1)
            except TypeError, e:
                log_debug(-1, "Error \"%s\" encoding response = %s" % (e, response))
                Traceback("apacheHandler.response", req,
                          extra="Error \"%s\" encoding response = %s" % (e, response),
                          severity="notification")
                return apache.HTTP_INTERNAL_SERVER_ERROR
            except Exception:  # pylint: disable=E0012, W0703
                # Uncaught exception; signal the error
                Traceback("apacheHandler.response", req,
                          severity="unhandled")
                return apache.HTTP_INTERNAL_SERVER_ERROR
Exemplo n.º 25
0
    def new_system_user_pass(self, profile_name, os_release_name,
                             version, arch, username,
                             password, other):
        """ Registers a new system to an org specified by a username, password, and
            optionally an org id.

            New for RHEL 5.

            All args are strings except other.
            other is a dict with:
            * org_id - optional. Must be a string that contains the number. If it's
            not given, the default org is used.
            * reg_num - optional. It should be an EN. It will not be activated. It's
            used for automatic subscription to child channels and for deciding which
            service level to entitle the machine to (managment, provisioning, etc).
            If not given, the machine will only be registered to a base channel and
            entitled to the highest level possible.

            If a profile is created it will return a dict with:
            * system_id - the same xml as was previously returned
            * channels - a list of the channels (as strings) the system was
              subscribed to
            * failed_channels - a list of channels (as strings) that
              the system should have been subscribed to but couldn't be because they
              don't have the necessary entitlements available. Can contain all the
              channels including the base channel.
            * system_slots - a list of the system slots used (as strings).
            * failed_system_slots - a list of system slots (as strings) that they
              should have used but couldn't because there weren't available
              entitlements
            * universal_activation_key - a list of universal default activation keys
              (as strings) that were used while registering.
            Allowable slots are 'enterprise_entitled' (management), 'sw_mgr_entitled'
            (updates), 'monitoring_entitled' (monitoring add on to management), and
            provisioning_entitled (provisioning add on to management).
            The call will try to use the highest system slot available. An entry will
            be added to failed_system_slots for each one that is tried and fails and
            system_slots will contain the one that succeeded if any.
            Eg: Calling this on hosted with no reg num and only update entitlements
            will result in system_slots containing 'sw_mgr_entitled' and
            failed_system_slots containing 'enterprise_entitled'.

            If an error occurs which prevents the creation of a profile, a fault will
            be raised:
            TODO
        """

        add_to_seclist(password)

        log_debug(4,'in new_system_user_pass')

        # release_name wasn't required in the old call, so I'm just going to
        # add it to other
        other['release_name'] = os_release_name

        # Authorize the username and password. Save the returned user object.
        user = self.validate_system_user(username, password)

        # This creates the rhnServer record and commits it to the db.
        # It also assigns the system a base channel.
        server_data = self.create_system(user, profile_name,
                                         version,
                                         arch,
                                         other)
        # Save the returned Server object
        newserv = server_data['server']

        # Get the server db id.
        server_id = newserv.getid()

        # Get the server certificate file
        system_certificate = newserv.system_id()

        log_debug(4, 'Server id created as %s' % server_id)

        failures = []
        unknowns = []

        # Build our return values.
        attempted_channels = []
        successful_channels = []
        failed_channels = []

        actual_channels = rhnChannel.channels_for_server(server_id)
        for channel in actual_channels:
            successful_channels.append(channel['label'])

        # If we don't have any successful channels, we know the base channel
        # failed.
        if len(successful_channels) == 0:
            log_debug(4, 'System %s not subscribed to any channels' % server_id)

            # Look up the base channel, and store it as a failure.
            try:
                base = rhnChannel.get_channel_for_release_arch(
                                                        version,
                                                        arch, newserv['org_id'])
                failed_channels.append(base['label'])
            # We want to swallow exceptions here as we are just generating data
            # for the review screen in rhn_register.
            except:
                pass

        # Store any of our child channel failures
        failed_channels = failed_channels + failures

        attempted_system_slots = ['enterprise_entitled', 'sw_mgr_entitled']
        successful_system_slots = server_lib.check_entitlement(server_id)
        successful_system_slots = successful_system_slots.keys()
        failed_system_slots = []

        # Check which entitlement level we got, starting with the highest.
        i = 0
        for slot in attempted_system_slots:
            if slot in successful_system_slots:
                break
            i = i + 1

        # Any entitlements we didn't have, we'll store as a failure.
        failed_system_slots = attempted_system_slots[0:i]

        universal_activation_key = []
        if rhnFlags.test("universal_registration_token"):
            token = rhnFlags.get("universal_registration_token")
            universal_activation_key = token.get_tokens()

        return { 'system_id' : system_certificate,
                 'channels' : successful_channels,
                 'failed_channels' : failed_channels,
                 'failed_options' : unknowns,
                 'system_slots' : successful_system_slots,
                 'failed_system_slots' : failed_system_slots,
                 'universal_activation_key' : universal_activation_key
                 }
Exemplo n.º 26
0
    def new_system_user_pass(self, profile_name, os_release_name,
                             version, arch, username,
                             password, other):
        """ Registers a new system to an org specified by a username, password, and
            optionally an org id.

            New for RHEL 5.

            All args are strings except other.
            other is a dict with:
            * org_id - optional. Must be a string that contains the number. If it's
            not given, the default org is used.
            * reg_num - optional. It should be an EN. It will not be activated. It's
            used for automatic subscription to child channels and for deciding which
            service level to entitle the machine to (managment, provisioning, etc).
            If not given, the machine will only be registered to a base channel and
            entitled to the highest level possible.

            If a profile is created it will return a dict with:
            * system_id - the same xml as was previously returned
            * channels - a list of the channels (as strings) the system was
              subscribed to
            * failed_channels - a list of channels (as strings) that
              the system should have been subscribed to but couldn't be because they
              don't have the necessary entitlements available. Can contain all the
              channels including the base channel.
            * system_slots - a list of the system slots used (as strings).
            * failed_system_slots - a list of system slots (as strings) that they
              should have used but couldn't because there weren't available
              entitlements
            * universal_activation_key - a list of universal default activation keys
              (as strings) that were used while registering.
            Allowable slots are 'enterprise_entitled' (management), 'sw_mgr_entitled'
            (updates), 'monitoring_entitled' (monitoring add on to management), and
            provisioning_entitled (provisioning add on to management).
            The call will try to use the highest system slot available. An entry will
            be added to failed_system_slots for each one that is tried and fails and
            system_slots will contain the one that succeeded if any.
            Eg: Calling this on hosted with no reg num and only update entitlements
            will result in system_slots containing 'sw_mgr_entitled' and
            failed_system_slots containing 'enterprise_entitled'.

            If an error occurs which prevents the creation of a profile, a fault will
            be raised:
            TODO
        """

        add_to_seclist(password)

        log_debug(4,'in new_system_user_pass')

        # release_name wasn't required in the old call, so I'm just going to
        # add it to other
        other['release_name'] = os_release_name

        # Authorize the username and password. Save the returned user object.
        user = self.validate_system_user(username, password)

        # This creates the rhnServer record and commits it to the db.
        # It also assigns the system a base channel.
        server_data = self.create_system(user, profile_name,
                                         version,
                                         arch,
                                         other)
        # Save the returned Server object
        newserv = server_data['server']

        # Get the server db id.
        server_id = newserv.getid()

        # Get the server certificate file
        system_certificate = newserv.system_id()

        log_debug(4, 'Server id created as %s' % server_id)

        failures = []
        unknowns = []

        # Build our return values.
        attempted_channels = []
        successful_channels = []
        failed_channels = []

        actual_channels = rhnChannel.channels_for_server(server_id)
        for channel in actual_channels:
            successful_channels.append(channel['label'])

        # If we don't have any successful channels, we know the base channel
        # failed.
        if len(successful_channels) == 0:
            log_debug(4, 'System %s not subscribed to any channels' % server_id)

            # Look up the base channel, and store it as a failure.
            try:
                base = rhnChannel.get_channel_for_release_arch(
                                                        version,
                                                        arch, newserv['org_id'])
                failed_channels.append(base['label'])
            # We want to swallow exceptions here as we are just generating data
            # for the review screen in rhn_register.
            except:
                pass

        # Store any of our child channel failures
        failed_channels = failed_channels + failures

        attempted_system_slots = ['enterprise_entitled', 'sw_mgr_entitled']
        successful_system_slots = server_lib.check_entitlement(server_id)
        successful_system_slots = successful_system_slots.keys()
        failed_system_slots = []

        # Check which entitlement level we got, starting with the highest.
        i = 0
        for slot in attempted_system_slots:
            if slot in successful_system_slots:
                break
            i = i + 1

        # Any entitlements we didn't have, we'll store as a failure.
        failed_system_slots = attempted_system_slots[0:i]

        universal_activation_key = []
        if rhnFlags.test("universal_registration_token"):
            token = rhnFlags.get("universal_registration_token")
            universal_activation_key = token.get_tokens()

        return { 'system_id' : system_certificate,
                 'channels' : successful_channels,
                 'failed_channels' : failed_channels,
                 'failed_options' : unknowns,
                 'system_slots' : successful_system_slots,
                 'failed_system_slots' : failed_system_slots,
                 'universal_activation_key' : universal_activation_key
                 }
Exemplo n.º 27
0
    def response(self, req, response):
        """ send the response (common code) """

        # Send the xml-rpc response back
        log_debug(5, "Response type", type(response))

        needs_xmlrpc_encoding = rhnFlags.test("NeedEncoding")
        compress_response = rhnFlags.test("compress_response")
        # Init an output object; we'll use it for sending data in various
        # formats
        if isinstance(response, rpclib.transports.File):
            if not hasattr(response.file_obj, 'fileno') and compress_response:
                # This is a StringIO that has to be compressed, so read it in
                # memory; mark that we don't have to do any xmlrpc encoding
                response = response.file_obj.read()
                needs_xmlrpc_encoding = 0
            else:
                # Just treat is as a file
                return self.response_file(req, response)

        is_fault = 0
        if isinstance(response, rhnFault):
            if req.method == 'GET':
                return self._response_fault_get(req, response.getxml())
            # Need to encode the response as xmlrpc
            response = response.getxml()
            is_fault = 1
            # No compression
            compress_response = 0
            # This is an xmlrpc Fault, so we have to encode it
            needs_xmlrpc_encoding = 1

        output = rpclib.transports.Output()

        if not is_fault:
            # First, use the same encoding/transfer that the client used
            output.set_transport_flags(
                transfer=rpclib.transports.lookupTransfer(self.input.transfer),
                encoding=rpclib.transports.lookupEncoding(self.input.encoding))

        if compress_response:
            # check if we have to compress this result
            log_debug(4, "Compression on for client version", self.clientVersion)
            if self.clientVersion > 0:
                output.set_transport_flags(output.TRANSFER_BINARY,
                                           output.ENCODE_ZLIB)
            else:  # original clients had the binary transport support broken
                output.set_transport_flags(output.TRANSFER_BASE64,
                                           output.ENCODE_ZLIB)

        # We simply add the transport options to the output headers
        output.headers.update(rhnFlags.get('outputTransportOptions').dict())

        if needs_xmlrpc_encoding:
            # Normalize the response
            response = self.normalize(response)
            try:
                response = rpclib.xmlrpclib.dumps(response, methodresponse=1)
            except TypeError as e:
                log_debug(-1, "Error \"%s\" encoding response = %s" % (e, response))
                Traceback("apacheHandler.response", req,
                          extra="Error \"%s\" encoding response = %s" % (e, response),
                          severity="notification")
                return apache.HTTP_INTERNAL_SERVER_ERROR
            except Exception:  # pylint: disable=E0012, W0703
                # Uncaught exception; signal the error
                Traceback("apacheHandler.response", req,
                          severity="unhandled")
                return apache.HTTP_INTERNAL_SERVER_ERROR

        # we're about done here, patch up the headers
        output.process(response)
        # Copy the rest of the fields
        for k, v in list(output.headers.items()):
            if k.lower() == 'content-type':
                # Content-type
                req.content_type = v
            else:
                setHeaderValue(req.headers_out, k, v)

        if CFG.DEBUG == 4:
            # I wrap this in an "if" so we don't parse a large file for no reason.
            log_debug(4, "The response: %s[...SNIP (for sanity) SNIP...]%s" %
                      (response[:100], response[-100:]))
        elif CFG.DEBUG >= 5:
            # if you absolutely must have that whole response in the log file
            log_debug(5, "The response: %s" % response)

        # send the headers
        req.send_http_header()
        try:
            # XXX: in case data is really large maybe we should split
            # it in smaller chunks instead of blasting everything at
            # once. Not yet a problem...
            req.write(output.data)
        except IOError:
            # send_http_header is already sent, so it doesn't make a lot of
            # sense to return a non-200 error; but there is no better solution
            return apache.HTTP_BAD_REQUEST
        del output
        return apache.OK
Exemplo n.º 28
0
def store_client_route(server_id):
    """ Stores the route the client took to get to hosted or the Satellite """

    log_debug(5, server_id)

    # get the old routing information for this server_id
    # oldRoute in this format: [(id0, hostname0),  (id1, hostname1),  ...]
    #                           closest to client, ..., closest to server
    h = rhnSQL.prepare("""
        select position,
               proxy_server_id,
               hostname
          from rhnServerPath
         where server_id = :server_id
        order by position
        """)
    h.execute(server_id=server_id)
    oldRoute = h.fetchall_dict() or []
    newRoute = []

    # construct oldRoute_ from oldRoute, to have the actual format described above
    oldRouteTuples = []
    for oldRouteDict in oldRoute:
        oldRouteTuples.append((str(oldRouteDict['proxy_server_id']), oldRouteDict['hostname']))

    # code block if there *is* routing info in the headers
    # NOTE: X-RHN-Proxy-Auth described in proxy/broker/rhnProxyAuth.py
    if rhnFlags.test('X-RHN-Proxy-Auth'):
        tokens = string.split(rhnFlags.get('X-RHN-Proxy-Auth'), ',')
        tokens = [token for token in tokens if token]

        log_debug(4, "route tokens", tokens)
        # newRoute in this format: [(id0, hostname0),  (id1, hostname1),  ...]
        #                           closest to client, ..., closest to server
        for token in tokens:
            token, hostname = apacheAuth.splitProxyAuthToken(token)
            if hostname is None:
                log_debug(3, "NOTE: Spacewalk Proxy v1.1 detected - route tracking is unsupported")
                newRoute = []
                break
            newRoute.append((token[0], hostname))

        log_debug(4, "newRoute", newRoute)

    if oldRouteTuples == newRoute:
        # Nothing to do here
        # This also catches the case of no routes at all
        return

    if oldRouteTuples:
        # blow away table rhnServerPath entries for server_id
        log_debug(8, 'blow away route-info for %s' % server_id)
        h = rhnSQL.prepare("""
            delete from rhnServerPath where server_id = :server_id
        """)
        h.execute(server_id=server_id)

    if not newRoute:
        log_debug(3, "No new route to add")
        rhnSQL.commit()
        return

    log_debug(8, 'adding route-info entries: %s - %s' % (server_id, newRoute))

    h = rhnSQL.prepare("""
        insert into rhnServerPath
               (server_id, proxy_server_id, position, hostname)
        values (:server_id, :proxy_server_id, :position, :hostname)
    """)
    server_ids = []
    proxy_ids = []
    proxy_hostnames = []
    positions = []
    counter = 0
    for p in newRoute:
        proxy_id, proxy_hostname = p[:2]
        proxy_ids.append(proxy_id)
        proxy_hostnames.append(proxy_hostname)
        server_ids.append(server_id)
        positions.append(counter)
        counter = counter + 1

    log_debug(5, server_ids, proxy_ids, positions,
              proxy_hostnames)
    h.executemany(server_id=server_ids, proxy_server_id=proxy_ids,
                  position=positions, hostname=proxy_hostnames)

    rhnSQL.commit()
Exemplo n.º 29
0
    def create_system(self, user, profile_name, release_version,
                                  architecture, data):
        """
        Create a system based on the input parameters.

        Return dict containing a server object for now.
        Called by new_system (< rhel5)
              and new_system_user_pass | new_system_activation_key (>= rhel5)
        """

        if profile_name is not None and not \
           rhnFlags.test("re_registration_token") and \
           len(profile_name) < 1:
           raise rhnFault(800)

        # log entry point
        if data.has_key("token"):
            log_item = "token = '%s'" % data["token"]
        else:
            log_item = "username = '******'" % user.username

        log_debug(1, log_item, release_version, architecture)

        # Fetch the applet's UUID
        if data.has_key("uuid"):
            applet_uuid = data['uuid']
            log_debug(3, "applet uuid", applet_uuid)
        else:
            applet_uuid = None

        # Fetch the up2date UUID
        if data.has_key("rhnuuid"):
            up2date_uuid = data['rhnuuid']
            log_debug(3, "up2date uuid", up2date_uuid)
            # XXX Should somehow check the uuid uniqueness
            #raise rhnFault(105, "A system cannot be registered multiple times")
        else:
            up2date_uuid = None

        release = str(release_version)

        if data.has_key('token'):
            token_string = data['token']
            # Look the token up; if the token does not exist or is invalid,
            # stop right here (search_token raises the appropriate rhnFault)
            tokens_obj = rhnServer.search_token(token_string)
            log_user_id = tokens_obj.get_user_id()
        else:
            # user should not be null here
            log_user_id = user.getid()
            tokens_obj = rhnServer.search_org_token(user.contact["org_id"])
            log_debug(3,"universal_registration_token set as %s" %
                        str(tokens_obj.get_tokens()))
            rhnFlags.set("universal_registration_token", tokens_obj)

        if data.has_key('channel') and len(data['channel']) > 0:
            channel = data['channel']
            log_debug(3, "requested EUS channel: %s" % str(channel))
        else:
            channel = None

        newserv = None
        if tokens_obj:
            # Only set registration_token if we have token(s) available.
            # server_token.ActivationTokens.__nonzero__ should do the right
            # thing of filtering the case of no tokens
            rhnFlags.set("registration_token", tokens_obj)
            # Is the token associated with a server?
            if tokens_obj.is_rereg_token:
                # Also flag it's a re-registration token
                rhnFlags.set("re_registration_token", tokens_obj)
                # Load the server object
                newserv = rhnServer.search(tokens_obj.get_server_id())
                newserv.disable_token()
                # The old hardware info no longer applies
                newserv.delete_hardware()
                # Update the arch - it may have changed; we know the field was
                # provided for us
                newserv.set_arch(architecture)
                newserv.user = rhnUser.User("", "")
                newserv.user.reload(newserv.server['creator_id'])
                # Generate a new secret for this server
                newserv.gen_secret()
                # Get rid of the old package profile - it's bogus in this case
                newserv.dispose_packages()
                # The new server may have a different base channel
                newserv.change_base_channel(release)

        if newserv is None:
            # Not a re-registration token, we need a fresh server object
            rhnSQL.set_log_auth(log_user_id)
            newserv = rhnServer.Server(user, architecture)


        # Proceed with using the rest of the data
        newserv.server["release"] = release
        if data.has_key('release_name'):
            newserv.server["os"] = data['release_name']

        ## add the package list
        if data.has_key('packages'):
            for package in data['packages']:
                newserv.add_package(package)
        # add the hardware profile
        if data.has_key('hardware_profile'):
            for hw in data['hardware_profile']:
                newserv.add_hardware(hw)
        # fill in the other details from the data dictionary
        if profile_name is not None and not \
           rhnFlags.test("re_registration_token"):
            newserv.server["name"] = profile_name[:128]
        if data.has_key("os"):
            newserv.server["os"] = data["os"][:64]
        if data.has_key("description"):
            newserv.server["description"] = data["description"][:256]
        else:
            newserv.default_description()

        # Check for virt params
        # Get the uuid, if there is one.
        if data.has_key('virt_uuid'):
            virt_uuid = data['virt_uuid']
            if virt_uuid is not None \
               and not rhnVirtualization.is_host_uuid(virt_uuid):
                # If we don't have a virt_type key, we'll assume PARA.
                virt_type = None
                if data.has_key('virt_type'):
                    virt_type = data['virt_type']
                    if virt_type == 'para':
                        virt_type = rhnVirtualization.VirtualizationType.PARA
                    elif virt_type == 'fully':
                        virt_type = rhnVirtualization.VirtualizationType.FULLY
                    else:
                        raise Exception(
                            "Unknown virtualization type: %s" % virt_type)
                else:
                    raise Exception("Virtualization type not provided")
                newserv.virt_uuid = virt_uuid
                newserv.virt_type = virt_type
            else:
                newserv.virt_uuid = None
                newserv.virt_type = None
        else:
            newserv.virt_uuid = None
            newserv.virt_type = None


        # If we didn't find virt info from xen, check smbios
        if data.has_key('smbios') and newserv.virt_uuid is None:
            (newserv.virt_type, newserv.virt_uuid) = \
                    parse_smbios(data['smbios'])

        if tokens_obj.forget_rereg_token:
	    # At this point we retained the server with re-activation
	    # let the stacked activation keys do their magic
            tokens_obj.is_rereg_token = 0
            rhnFlags.set("re_registration_token", 0)

        # now if we have a token, load the extra registration
        # information from the token
        if rhnFlags.test("registration_token"):
            # Keep the original "info" field
            newserv.load_token()
            # we need to flush the registration information into the
            # database so we can proceed with processing the rest of
            # the token information (like subscribing the server to
            # groups, channels, etc)

            # bretm 02/19/2007 -- this shouldn't throw any of the following:
            #   SubscriptionCountExceeded
            #   BaseChannelDeniedError
            #   NoBaseChannelError
            # since we have the token object, and underneath the hood, we have none_ok=have_token

            # BUT - it does. So catch them and throw. this will make rhnreg_ks
            # die out, but oh well. at least they don't end up registered, and
            # without a base channel.
            try:
                # don't commit
                newserv.save(0, channel)
            except (rhnChannel.SubscriptionCountExceeded,
                    rhnChannel.NoBaseChannelError), channel_error:
                raise rhnFault(70), None, sys.exc_info()[2]
            except rhnChannel.BaseChannelDeniedError, channel_error:
                raise rhnFault(71), None, sys.exc_info()[2]