Example #1
0
 def __init__ (self, filesystem):
     self.filesystem = filesystem
     # count total hits
     self.hit_counter = counter()
     # count file deliveries
     self.file_counter = counter()
     # count cache hits
     self.cache_counter = counter()
Example #2
0
    def __init__ (self, ip, port, resolver=None, logger_object=None):
        self.ip = ip
        self.port = port
        asyncore.dispatcher.__init__ (self)
        self.create_socket (socket.AF_INET, socket.SOCK_STREAM)

        self.handlers = []

        if not logger_object:
            logger_object = logger.file_logger (sys.stdout)

        self.set_reuse_addr()
        self.bind ((ip, port))

        # lower this to 5 if your OS complains
        self.listen (1024)

        host, port = self.socket.getsockname()
        if not ip:
            self.log_info('Computing default hostname', 'warning')
            ip = socket.gethostbyname (socket.gethostname())
        try:
            self.server_name = socket.gethostbyaddr (ip)[0]
        except socket.error:
            self.log_info('Cannot do reverse lookup', 'warning')
            self.server_name = ip       # use the IP address as the "hostname"

        self.server_port = port
        self.total_clients = counter()
        self.total_requests = counter()
        self.exceptions = counter()
        self.bytes_out = counter()
        self.bytes_in  = counter()

        if not logger_object:
            logger_object = logger.file_logger (sys.stdout)

        if resolver:
            self.logger = logger.resolving_logger (resolver, logger_object)
        else:
            self.logger = logger.unresolving_logger (logger_object)

        self.log_info (
                'Medusa (V%s) started at %s'
                '\n\tHostname: %s'
                '\n\tPort:%d'
                '\n' % (
                        VERSION_STRING,
                        time.ctime(time.time()),
                        self.server_name,
                        port,
                        )
                )
Example #3
0
 def __init__ (self, hostname='127.0.0.1', port=8023):
     asyncore.dispatcher.__init__(self)
     self.hostname = hostname
     self.port = port
     self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
     self.set_reuse_addr()
     self.bind ((hostname, port))
     self.log_info('%s started on port %d' % (self.SERVER_IDENT, port))
     self.listen (5)
     self.closed             = 0
     self.failed_auths = 0
     self.total_sessions = counter()
     self.closed_sessions = counter()
Example #4
0
 def __init__ (self, server, conn, addr):
     self.channel_number = http_channel.channel_counter.increment()
     self.request_counter = counter()
     asynchat.async_chat.__init__ (self, conn)
     self.server = server
     self.addr = addr
     self.set_terminator ('\r\n\r\n')
     self.in_buffer = ''
     self.creation_time = int (time.time())
     self.check_maintenance()
Example #5
0
    def postbind(self):
        from supervisor.medusa.counter import counter
        from supervisor.medusa.http_server import VERSION_STRING

        self.listen(1024)

        self.total_clients = counter()
        self.total_requests = counter()
        self.exceptions = counter()
        self.bytes_out = counter()
        self.bytes_in  = counter()

        self.log_info (
                'Medusa (V%s) started at %s'
                '\n\tHostname: %s'
                '\n\tPort:%s'
                '\n' % (
                        VERSION_STRING,
                        time.ctime(time.time()),
                        self.server_name,
                        self.port,
                        )
                )
Example #6
0
 def __init__ (self, server, sock, addr):
     asynchat.async_chat.__init__ (self, sock)
     self.server = server
     self.addr = addr
     self.set_terminator ('\r\n')
     self.data = ''
     # local bindings specific to this channel
     self.local_env = {}
     # send timestamp string
     self.timestamp = str(time.time())
     self.count = 0
     self.line_counter = counter()
     self.number = int(server.total_sessions.as_long())
     self.multi_line = []
     self.push (self.timestamp + '\r\n')
Example #7
0
 def __init__ (self, server, sock, addr):
     asynchat.async_chat.__init__ (self, sock)
     self.server = server
     self.addr = addr
     self.set_terminator ('\r\n')
     self.data = ''
     # local bindings specific to this channel
     self.local_env = sys.modules['__main__'].__dict__.copy()
     self.push ('Python ' + sys.version + '\r\n')
     self.push (sys.copyright+'\r\n')
     self.push ('Welcome to %s\r\n' % self)
     self.push ("[Hint: try 'from __main__ import *']\r\n")
     self.prompt()
     self.number = server.total_sessions.as_long()
     self.line_counter = counter()
     self.multi_line = []
Example #8
0
    def __init__ (
            self,
            authorizer,
            hostname        =None,
            ip              ='',
            port            =21,
            resolver        =None,
            logger_object=logger.file_logger (sys.stdout)
            ):
        self.ip = ip
        self.port = port
        self.authorizer = authorizer

        if hostname is None:
            self.hostname = socket.gethostname()
        else:
            self.hostname = hostname

        # statistics
        self.total_sessions = counter()
        self.closed_sessions = counter()
        self.total_files_out = counter()
        self.total_files_in = counter()
        self.total_bytes_out = counter()
        self.total_bytes_in = counter()
        self.total_exceptions = counter()
        #
        asyncore.dispatcher.__init__ (self)
        self.create_socket (socket.AF_INET, socket.SOCK_STREAM)

        self.set_reuse_addr()
        self.bind ((self.ip, self.port))
        self.listen (5)

        if not logger_object:
            logger_object = sys.stdout

        if resolver:
            self.logger = logger.resolving_logger (resolver, logger_object)
        else:
            self.logger = logger.unresolving_logger (logger_object)

        self.log_info('FTP server started at %s\n\tAuthorizer:%s\n\tHostname: %s\n\tPort: %d' % (
                time.ctime(time.time()),
                repr (self.authorizer),
                self.hostname,
                self.port)
                )
Example #9
0
class http_channel(asynchat.async_chat):

    # use a larger default output buffer
    ac_out_buffer_size = 1 << 16

    current_request = None
    channel_counter = counter()

    def __init__(self, server, conn, addr):
        self.channel_number = http_channel.channel_counter.increment()
        self.request_counter = counter()
        asynchat.async_chat.__init__(self, conn)
        self.server = server
        self.addr = addr
        self.set_terminator('\r\n\r\n')
        self.in_buffer = ''
        self.creation_time = int(time.time())
        self.last_used = self.creation_time
        self.check_maintenance()

    def __repr__(self):
        ar = asynchat.async_chat.__repr__(self)[1:-1]
        return '<%s channel#: %s requests:%s>' % (ar, self.channel_number,
                                                  self.request_counter)

    # Channel Counter, Maintenance Interval...
    maintenance_interval = 500

    def check_maintenance(self):
        if not self.channel_number % self.maintenance_interval:
            self.maintenance()

    def maintenance(self):
        self.kill_zombies()

    # 30-minute zombie timeout.  status_handler also knows how to kill zombies.
    zombie_timeout = 30 * 60

    def kill_zombies(self):
        now = int(time.time())
        for channel in asyncore.socket_map.values():
            if channel.__class__ == self.__class__:
                if (now - channel.last_used) > channel.zombie_timeout:
                    channel.close()

    # --------------------------------------------------
    # send/recv overrides, good place for instrumentation.
    # --------------------------------------------------

    # this information needs to get into the request object,
    # so that it may log correctly.
    def send(self, data):
        result = asynchat.async_chat.send(self, data)
        self.server.bytes_out.increment(len(data))
        self.last_used = int(time.time())
        return result

    def recv(self, buffer_size):
        try:
            result = asynchat.async_chat.recv(self, buffer_size)
            self.server.bytes_in.increment(len(result))
            self.last_used = int(time.time())
            return result
        except MemoryError:
            # --- Save a Trip to Your Service Provider ---
            # It's possible for a process to eat up all the memory of
            # the machine, and put it in an extremely wedged state,
            # where medusa keeps running and can't be shut down.  This
            # is where MemoryError tends to get thrown, though of
            # course it could get thrown elsewhere.
            sys.exit("Out of Memory!")

    def handle_error(self):
        t, v = sys.exc_info()[:2]
        if t is SystemExit:
            raise t(v)
        else:
            asynchat.async_chat.handle_error(self)

    def log(self, *args):
        pass

    # --------------------------------------------------
    # async_chat methods
    # --------------------------------------------------

    def collect_incoming_data(self, data):
        if self.current_request:
            # we are receiving data (probably POST data) for a request
            self.current_request.collect_incoming_data(data)
        else:
            # we are receiving header (request) data
            self.in_buffer = self.in_buffer + data

    def found_terminator(self):
        if self.current_request:
            self.current_request.found_terminator()
        else:
            header = self.in_buffer
            self.in_buffer = ''
            lines = header.split('\r\n')

            # --------------------------------------------------
            # crack the request header
            # --------------------------------------------------

            while lines and not lines[0]:
                # as per the suggestion of http-1.1 section 4.1, (and
                # Eric Parker <*****@*****.**>), ignore a leading
                # blank lines (buggy browsers tack it onto the end of
                # POST requests)
                lines = lines[1:]

            if not lines:
                self.close_when_done()
                return

            request = lines[0]

            command, uri, version = crack_request(request)
            header = join_headers(lines[1:])

            # unquote path if necessary (thanks to Skip Montanaro for pointing
            # out that we must unquote in piecemeal fashion).
            rpath, rquery = splitquery(uri)
            if '%' in rpath:
                if rquery:
                    uri = unquote(rpath) + '?' + rquery
                else:
                    uri = unquote(rpath)

            r = http_request(self, request, command, uri, version, header)
            self.request_counter.increment()
            self.server.total_requests.increment()

            if command is None:
                self.log_info('Bad HTTP request: %s' % repr(request), 'error')
                r.error(400)
                return

            # --------------------------------------------------
            # handler selection and dispatch
            # --------------------------------------------------
            for h in self.server.handlers:
                if h.match(r):
                    try:
                        self.current_request = r
                        # This isn't used anywhere.
                        # r.handler = h # CYCLE
                        h.handle_request(r)
                    except:
                        self.server.exceptions.increment()
                        (file, fun,
                         line), t, v, tbinfo = asyncore.compact_traceback()
                        self.log_info(
                            'Server Error: %s, %s: file: %s line: %s' %
                            (t, v, file, line), 'error')
                        try:
                            r.error(500)
                        except:
                            pass
                    return

            # no handlers, so complain
            r.error(404)

    def writable_for_proxy(self):
        # this version of writable supports the idea of a 'stalled' producer
        # [i.e., it's not ready to produce any output yet] This is needed by
        # the proxy, which will be waiting for the magic combination of
        # 1) hostname resolved
        # 2) connection made
        # 3) data available.
        if self.ac_out_buffer:
            return 1
        elif len(self.producer_fifo):
            p = self.producer_fifo.first()
            if hasattr(p, 'stalled'):
                return not p.stalled()
            else:
                return 1
 def __init__ (self, filesystem):
     self.filesystem = filesystem
     self.hits = counter.counter()
     self.exceptions = counter.counter()
Example #11
0
class rpc_channel(asynchat.async_chat):
    """Simple RPC server."""

    # a 'packet': NNNNNNNNmmmmmmmmmmmmmmmm
    # (hex length in 8 bytes, followed by marshal'd packet data)
    # same protocol used in both directions.

    STATE_LENGTH = 'length state'
    STATE_PACKET = 'packet state'

    ac_out_buffer_size = 65536

    request_counter = counter()
    exception_counter = counter()
    client_counter = counter()

    def __init__(self, root, conn, addr):
        self.root = root
        self.addr = addr
        asynchat.async_chat.__init__(self, conn)
        self.pstate = self.STATE_LENGTH
        self.set_terminator(8)
        self.buffer = []
        self.proxies = {}
        rid = id(root)
        self.new_reference(root)
        p = marshal.dumps((rid, ))
        # send root oid to the other side
        self.push('%08x%s' % (len(p), p))
        self.client_counter.increment()

    def new_reference(self, object):
        oid = id(object)
        ignore, refcnt = self.proxies.get(oid, (None, 0))
        self.proxies[oid] = (object, refcnt + 1)

    def forget_reference(self, oid):
        object, refcnt = self.proxies.get(oid, (None, 0))
        if refcnt > 1:
            self.proxies[oid] = (object, refcnt - 1)
        else:
            del self.proxies[oid]

    def log(self, *ignore):
        pass

    def collect_incoming_data(self, data):
        self.buffer.append(data)

    def found_terminator(self):
        self.buffer, data = [], ''.join(self.buffer)

        if self.pstate is self.STATE_LENGTH:
            packet_length = int(data, 16)
            self.set_terminator(packet_length)
            self.pstate = self.STATE_PACKET
        else:

            self.set_terminator(8)
            self.pstate = self.STATE_LENGTH

            oid, kind, arg = marshal.loads(data)

            obj, refcnt = self.proxies[oid]
            reply_kind = 2

            try:
                if kind == 0:
                    # __call__
                    result = obj(*arg)
                elif kind == 1:
                    # __getattr__
                    result = getattr(obj, arg)
                elif kind == 2:
                    # __setattr__
                    key, value = arg
                    setattr(obj, key, value)
                    result = None
                elif kind == 3:
                    # __repr__
                    result = repr(obj)
                elif kind == 4:
                    # __del__
                    self.forget_reference(oid)
                    result = None
                elif kind == 5:
                    # __getitem__
                    result = obj[arg]
                elif kind == 6:
                    # __setitem__
                    (key, value) = arg
                    obj[key] = value
                    result = None
                elif kind == 7:
                    # __len__
                    result = len(obj)
                else:
                    result = None

            except:
                reply_kind = 1
                (file, fun, line), t, v, tbinfo = asyncore.compact_traceback()
                result = '%s:%s:%s:%s (%s:%s)' % (MY_NAME, file, fun, line, t,
                                                  str(v))
                self.log_info(result, 'error')
                self.exception_counter.increment()

            self.request_counter.increment()

            try:
                rb = marshal.dumps((reply_kind, result))
            except ValueError:
                # unmarshallable object, return a reference
                rid = id(result)
                self.new_reference(result)
                rb = marshal.dumps((0, rid))

            self.push_with_producer(
                scanning_producer(('%08x' % len(rb)) + rb, buffer_size=65536))
Example #12
0
 def __init__(self, request, file):
     self.buffer = ''
     self.request = request
     self.file = file
     self.got_header = 0
     self.bytes_out = counter.counter()
Example #13
0
 def __init__ (self, server='127.0.0.1'):
     resolver.__init__(self, server)
     self.cache = {}
     self.forward_requests = counter()
     self.reverse_requests = counter()
     self.cache_hits = counter()
 def __init__ (self):
     self.modules = {}
     self.hits = counter.counter()
     self.exceptions = counter.counter()
Example #15
0
 def __init__ (self, filesystem):
     self.filesystem = filesystem
     self.hits = counter.counter()
     self.exceptions = counter.counter()
Example #16
0
class http_request:

    # default reply code
    reply_code = 200

    request_counter = counter()

    # Whether to automatically use chunked encoding when
    #
    #   HTTP version is 1.1
    #   Content-Length is not set
    #   Chunked encoding is not already in effect
    #
    # If your clients are having trouble, you might want to disable this.
    use_chunked = 1

    # by default, this request object ignores user data.
    collector = None

    def __init__(self, *args):
        # unpack information about the request
        (self.channel, self.request, self.command, self.uri, self.version,
         self.header) = args

        self.outgoing = []
        self.reply_headers = {
            'Server': 'Medusa/%s' % VERSION_STRING,
            'Date': http_date.build_http_date(time.time())
        }

        # New reply header list (to support multiple
        # headers with same name)
        self.__reply_header_list = []

        self.request_number = http_request.request_counter.increment()
        self._split_uri = None
        self._header_cache = {}

    # --------------------------------------------------
    # reply header management
    # --------------------------------------------------
    def __setitem__(self, key, value):
        self.reply_headers[key] = value

    def __getitem__(self, key):
        return self.reply_headers[key]

    def __contains__(self, key):
        return key in self.reply_headers

    def has_key(self, key):
        return key in self.reply_headers

    def build_reply_header(self):
        header_items = ['%s: %s' % item for item in self.reply_headers.items()]
        return '\r\n'.join([self.response(self.reply_code)] +
                           header_items) + '\r\n\r\n'

    ####################################################
    # multiple reply header management
    ####################################################
    # These are intended for allowing multiple occurrences
    # of the same header.
    # Usually you can fold such headers together, separating
    # their contents by a comma (e.g. Accept: text/html, text/plain)
    # but the big exception is the Set-Cookie header.
    # dictionary centric.
    #---------------------------------------------------

    def add_header(self, name, value):
        """ Adds a header to the reply headers """
        self.__reply_header_list.append((name, value))

    def clear_headers(self):
        """ Clears the reply header list """

        # Remove things from the old dict as well
        self.reply_headers.clear()

        self.__reply_header_list[:] = []

    def remove_header(self, name, value=None):
        """ Removes the specified header.
        If a value is provided, the name and
        value must match to remove the header.
        If the value is None, removes all headers
        with that name."""

        found_it = 0

        # Remove things from the old dict as well
        if (name in self.reply_headers
                and (value is None or self.reply_headers[name] == value)):
            del self.reply_headers[name]
            found_it = 1

        removed_headers = []
        if not value is None:
            if (name, value) in self.__reply_header_list:
                removed_headers = [(name, value)]
                found_it = 1
        else:
            for h in self.__reply_header_list:
                if h[0] == name:
                    removed_headers.append(h)
                    found_it = 1

        if not found_it:
            if value is None:
                search_value = "%s" % name
            else:
                search_value = "%s: %s" % (name, value)

            raise LookupError("Header '%s' not found" % search_value)

        for h in removed_headers:
            self.__reply_header_list.remove(h)

    def get_reply_headers(self):
        """ Get the tuple of headers that will be used
        for generating reply headers"""
        header_tuples = self.__reply_header_list[:]

        # The idea here is to insert the headers from
        # the old header dict into the new header list,
        # UNLESS there's already an entry in the list
        # that would have overwritten the dict entry
        # if the dict was the only storage...
        header_names = [n for n, v in header_tuples]
        for n, v in self.reply_headers.items():
            if n not in header_names:
                header_tuples.append((n, v))
                header_names.append(n)
        # Ok, that should do it.  Now, if there were any
        # headers in the dict that weren't in the list,
        # they should have been copied in.  If the name
        # was already in the list, we didn't copy it,
        # because the value from the dict has been
        # 'overwritten' by the one in the list.

        return header_tuples

    def get_reply_header_text(self):
        """ Gets the reply header (including status and
        additional crlf)"""

        header_tuples = self.get_reply_headers()

        headers = [self.response(self.reply_code)]
        headers += ["%s: %s" % h for h in header_tuples]
        return '\r\n'.join(headers) + '\r\n\r\n'

    #---------------------------------------------------
    # This is the end of the new reply header
    # management section.
    ####################################################

    # --------------------------------------------------
    # split a uri
    # --------------------------------------------------

    # <path>;<params>?<query>#<fragment>
    path_regex = re.compile(
        #      path      params    query   fragment
        r'([^;?#]*)(;[^?#]*)?(\?[^#]*)?(#.*)?')

    def split_uri(self):
        if self._split_uri is None:
            m = self.path_regex.match(self.uri)
            if m.end() != len(self.uri):
                raise ValueError("Broken URI")
            else:
                self._split_uri = m.groups()
        return self._split_uri

    def get_header_with_regex(self, head_reg, group):
        for line in self.header:
            m = head_reg.match(line)
            if m.end() == len(line):
                return m.group(group)
        return ''

    def get_header(self, header):
        header = header.lower()
        hc = self._header_cache
        if header not in hc:
            h = header + ': '
            hl = len(h)
            for line in self.header:
                if line[:hl].lower() == h:
                    r = line[hl:]
                    hc[header] = r
                    return r
            hc[header] = None
            return None
        else:
            return hc[header]

    # --------------------------------------------------
    # user data
    # --------------------------------------------------

    def collect_incoming_data(self, data):
        if self.collector:
            self.collector.collect_incoming_data(data)
        else:
            self.log_info(
                'Dropping %d bytes of incoming request data' % len(data),
                'warning')

    def found_terminator(self):
        if self.collector:
            self.collector.found_terminator()
        else:
            self.log_info('Unexpected end-of-record for incoming request',
                          'warning')

    def push(self, thing):
        if type(thing) == type(''):
            self.outgoing.append(
                producers.simple_producer(thing, buffer_size=len(thing)))
        else:
            self.outgoing.append(thing)

    def response(self, code=200):
        message = self.responses[code]
        self.reply_code = code
        return 'HTTP/%s %d %s' % (self.version, code, message)

    def error(self, code):
        self.reply_code = code
        message = self.responses[code]
        s = self.DEFAULT_ERROR_MESSAGE % {
            'code': code,
            'message': message,
        }
        self['Content-Length'] = len(s)
        self['Content-Type'] = 'text/html'
        # make an error reply
        self.push(s)
        self.done()

    # can also be used for empty replies
    reply_now = error

    def done(self):
        """finalize this transaction - send output to the http channel"""

        # ----------------------------------------
        # persistent connection management
        # ----------------------------------------

        #  --- BUCKLE UP! ----

        connection = get_header(CONNECTION, self.header).lower()

        close_it = 0
        wrap_in_chunking = 0

        if self.version == '1.0':
            if connection == 'keep-alive':
                if 'Content-Length' not in self:
                    close_it = 1
                else:
                    self['Connection'] = 'Keep-Alive'
            else:
                close_it = 1
        elif self.version == '1.1':
            if connection == 'close':
                close_it = 1
            elif 'Content-Length' not in self:
                if 'Transfer-Encoding' in self:
                    if not self['Transfer-Encoding'] == 'chunked':
                        close_it = 1
                elif self.use_chunked:
                    self['Transfer-Encoding'] = 'chunked'
                    wrap_in_chunking = 1
                else:
                    close_it = 1
        elif self.version is None:
            # Although we don't *really* support http/0.9 (because we'd have to
            # use \r\n as a terminator, and it would just yuck up a lot of stuff)
            # it's very common for developers to not want to type a version number
            # when using telnet to debug a server.
            close_it = 1

        outgoing_header = producers.simple_producer(
            self.get_reply_header_text())

        if close_it:
            self['Connection'] = 'close'

        if wrap_in_chunking:
            outgoing_producer = producers.chunked_producer(
                producers.composite_producer(self.outgoing))
            # prepend the header
            outgoing_producer = producers.composite_producer(
                [outgoing_header, outgoing_producer])
        else:
            # prepend the header
            self.outgoing.insert(0, outgoing_header)
            outgoing_producer = producers.composite_producer(self.outgoing)

        # apply a few final transformations to the output
        self.channel.push_with_producer(
            # globbing gives us large packets
            producers.globbing_producer(
                # hooking lets us log the number of bytes sent
                producers.hooked_producer(outgoing_producer, self.log)))

        self.channel.current_request = None

        if close_it:
            self.channel.close_when_done()

    def log_date_string(self, when):
        gmt = time.gmtime(when)
        if time.daylight and gmt[8]:
            tz = time.altzone
        else:
            tz = time.timezone
        if tz > 0:
            neg = 1
        else:
            neg = 0
            tz = -tz
        h, rem = divmod(tz, 3600)
        m, rem = divmod(rem, 60)
        if neg:
            offset = '-%02d%02d' % (h, m)
        else:
            offset = '+%02d%02d' % (h, m)

        return time.strftime('%d/%b/%Y:%H:%M:%S ', gmt) + offset

    def log(self, bytes):
        self.channel.server.logger.log(
            self.channel.addr[0], '%d - - [%s] "%s" %d %d\n' %
            (self.channel.addr[1], self.log_date_string(
                time.time()), self.request, self.reply_code, bytes))

    responses = {
        100: "Continue",
        101: "Switching Protocols",
        200: "OK",
        201: "Created",
        202: "Accepted",
        203: "Non-Authoritative Information",
        204: "No Content",
        205: "Reset Content",
        206: "Partial Content",
        300: "Multiple Choices",
        301: "Moved Permanently",
        302: "Moved Temporarily",
        303: "See Other",
        304: "Not Modified",
        305: "Use Proxy",
        400: "Bad Request",
        401: "Unauthorized",
        402: "Payment Required",
        403: "Forbidden",
        404: "Not Found",
        405: "Method Not Allowed",
        406: "Not Acceptable",
        407: "Proxy Authentication Required",
        408: "Request Time-out",
        409: "Conflict",
        410: "Gone",
        411: "Length Required",
        412: "Precondition Failed",
        413: "Request Entity Too Large",
        414: "Request-URI Too Large",
        415: "Unsupported Media Type",
        500: "Internal Server Error",
        501: "Not Implemented",
        502: "Bad Gateway",
        503: "Service Unavailable",
        504: "Gateway Time-out",
        505: "HTTP Version not supported"
    }

    # Default error message
    DEFAULT_ERROR_MESSAGE = '\r\n'.join(
        ('<head>', '<title>Error response</title>', '</head>', '<body>',
         '<h1>Error response</h1>', '<p>Error code %(code)d.',
         '<p>Message: %(message)s.', '</body>', ''))

    def log_info(self, msg, level):
        pass
Example #17
0
 def __init__ (self, channel, client_addr, fd):
     self.channel = channel
     self.client_addr = client_addr
     self.fd = fd
     asyncore.dispatcher.__init__ (self)
     self.bytes_in = counter()
 def __init__ (self, request, file):
     self.buffer = ''
     self.request = request
     self.file = file
     self.got_header = 0
     self.bytes_out = counter.counter()
Example #19
0
 def __init__ (self, dict, handler, realm='default'):
     self.authorizer = dictionary_authorizer (dict)
     self.handler = handler
     self.realm = realm
     self.pass_count = counter.counter()
     self.fail_count = counter.counter()
Example #20
0
 def __init__ (self):
     self.modules = {}
     self.hits = counter.counter()
     self.exceptions = counter.counter()
Example #21
0
 def __init__(self, dict, handler, realm="default"):
     self.authorizer = dictionary_authorizer(dict)
     self.handler = handler
     self.realm = realm
     self.pass_count = counter.counter()
     self.fail_count = counter.counter()
Example #22
0
class resolver(asyncore.dispatcher):
    id = counter()

    def __init__(self, server='127.0.0.1'):
        asyncore.dispatcher.__init__(self)
        self.create_socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.server = server
        self.request_map = {}
        self.last_reap_time = int(time.time())  # reap every few minutes

    def writable(self):
        return 0

    def log(self, *args):
        pass

    def handle_close(self):
        self.log_info('closing!')
        self.close()

    def handle_error(self):  # don't close the connection on error
        file_fun_line, t, v, tbinfo = asyncore.compact_traceback()
        self.log_info('Problem with DNS lookup (%s:%s %s)' % (t, v, tbinfo),
                      'error')

    def get_id(self):
        return self.id.as_long() % (1 << 16)

    def reap(self):  # find DNS requests that have timed out
        now = int(time.time())
        if now - self.last_reap_time > 180:  # reap every 3 minutes
            self.last_reap_time = now  # update before we forget
            for k, (host, unpack, callback,
                    when) in list(self.request_map.items()):
                if now - when > 180:  # over 3 minutes old
                    del self.request_map[k]
                    try:  # same code as in handle_read
                        callback(host, 0, None)  # timeout val is (0,None)
                    except:
                        file_fun_line, t, v, tbinfo = asyncore.compact_traceback(
                        )
                        self.log_info('%s %s %s' % (t, v, tbinfo), 'error')

    def resolve(self, host, callback):
        self.reap()  # first, get rid of old guys
        self.socket.sendto(fast_address_request(host, self.get_id()),
                           (self.server, 53))
        self.request_map[self.get_id()] = (host, unpack_address_reply,
                                           callback, int(time.time()))
        self.id.increment()

    def resolve_ptr(self, host, callback):
        self.reap()  # first, get rid of old guys
        ip = host.split('.')
        ip.reverse()
        ip = '.'.join(ip) + '.in-addr.arpa'
        self.socket.sendto(fast_ptr_request(ip, self.get_id()),
                           (self.server, 53))
        self.request_map[self.get_id()] = (host, unpack_ptr_reply, callback,
                                           int(time.time()))
        self.id.increment()

    def handle_read(self):
        reply, whence = self.socket.recvfrom(512)
        # for security reasons we may want to double-check
        # that <whence> is the server we sent the request to.
        id = (ord(reply[0]) << 8) + ord(reply[1])
        if id in self.request_map:
            host, unpack, callback, when = self.request_map[id]
            del self.request_map[id]
            ttl, answer = unpack(reply)
            try:
                callback(host, ttl, answer)
            except:
                file_fun_line, t, v, tbinfo = asyncore.compact_traceback()
                self.log_info('%s %s %s' % (t, v, tbinfo), 'error')
 def __init__ (self, pattern, redirect, regex_flag=re.IGNORECASE):
     self.pattern = pattern
     self.redirect = redirect
     self.patreg = re.compile (pattern, regex_flag)
     self.hits = counter.counter()