Exemplo n.º 1
0
 def __init__(self, *args):
     self._filesystem = None
     self._hit_counter = Counter()
     self._file_counter = Counter()
     self._cache_counter = Counter()
     self.cache_info = {
         'html': {
             'max_age': ONE_DAY,
             'extensions': ['html', 'htm', 'xml']
         },
         'javascript': {
             'max_age': ONE_DAY,
             'extensions': ['js', 'wjs']
         },
         'css': {
             'max_age': ONE_DAY,
             'extensions': ['css']
         },
         'images': {
             'max_age': ONE_DAY,
             'extensions': ['png', 'jpg', 'gif']
         },
         'flash': {
             'max_age': NOT_CACHED,
             'extensions': ['swf']
         },
         'others': {
             'max_age': ONE_DAY,
             'extensions': []
         },
     }
     super(FileRequestHandler, self).__init__(*args)
Exemplo n.º 2
0
 def _setup_counters(self):
     self.request_counter = Counter()
     self.response_counter = Counter()
     self.bytes_out = Counter()
     self.bytes_in = Counter()
     self.read_calls = Counter()
     self.readable_calls = Counter()
     self.write_calls = Counter()
     self.writable_calls = Counter()
     self.buffer_refills = Counter()
     self.refill_skips = Counter()
Exemplo n.º 3
0
 def __init__(self, monitor, timestamp):
     self.monitor = monitor
     self.timestamp = timestamp
     self.stagelock = Lock()
     self.stage_aborted = []
     self.stage_executed = []
     self.executioners = []
     self.pending_executioners = []
     self.aborted_executioners = []
     self.executed_executioners = []
     self.cancelled_executioners = []
     self.stage_counter = Counter()
     self.active_executioners = Counter()
     self.monitor.debugout('Created %s' % self, 1)
Exemplo n.º 4
0
 def __init__(self, qid, iterator, **kw):
     self.qid = qid
     self.complete = False
     self.iterator = iterator
     self.returned = Counter()
     self.timeout = kw.get("timeout", 300)
     self.default_count = kw.get("count", 1000)
     self.created = self.touched = uptime.secs()
     super(Query, self).__init__()
Exemplo n.º 5
0
 def __init__(self,
              ip,
              port,
              user_manager,
              realm,
              authentication,
              maintenance_interval=25,
              zombie_timeout=600,
              debug=0):
     self.debug = debug
     self.name = ip
     self.port = port
     self.user_manager = user_manager
     self.realm = realm
     self.authentication = authentication
     self.maintenance_interval = maintenance_interval
     self.zombie_timeout = zombie_timeout
     asyncore.dispatcher.__init__(self)
     self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
     # Note the double-reference for backwards comatibility.
     self.request_handlers = self.handlers = []
     self.response_handlers = []
     self.set_reuse_addr()
     self.bind((ip, port))
     self.listen(1024)
     host, port = self.socket.getsockname()
     try:
         if not ip:
             self.log_info('Computing default hostname', msglog.types.WARN)
             ip = socket.gethostbyname(socket.gethostname())
         self.name = socket.gethostbyaddr(ip)[0]
     except socket.error:
         self.name = ip
         self.log_info('Cannot do reverse lookup', msglog.types.WARN)
     self.total_clients = Counter()
     self.total_requests = Counter()
     self.exceptions = Counter()
     self.bytes_out = Counter()
     self.bytes_in = Counter()
     self.log_info('Started')
Exemplo n.º 6
0
class MonitorThread(threading.Thread):
    tm_counter = Counter(0)
    def __init__(self, monitor):
        self.monitor = monitor
        self.tm_number = self.tm_counter.increment()
        threading.Thread.__init__(self, None, None, self._get_name())
    def run(self):
        self.monitor.run_monitor()
    def _get_name(self):
        return '<%s.%s %d at %#x>' % (self.__class__.__module__, 
                                      self.__class__.__name__, 
                                      self.tm_number, id(self))
    def __repr__(self):
        return '%s for %s' % (self._get_name(), repr(self.monitor))
Exemplo n.º 7
0
 def __init__(self, monitor, host, port, protocol='http', debug=0):
     self.monitor = monitor
     self.host = host
     self.port = port
     self.protocol = protocol
     self.debug = debug
     self.free_channels = []
     self.active_channels = Dictionary()
     self.inactive_channels = Dictionary()
     self.parallel_channels = Counter()
     self.pending_transactions = []
     self.inflight_transactions = Dictionary()
     self.state_lock = Lock()
     self.debugout('Instantiated', 2)
Exemplo n.º 8
0
 def initialize(self):
     self.request_counter = Counter()
     self.request_manager = RequestManager()
     self.responses = {
         100: "Continue",
         101: "Switching Protocols",
         200: "OK",
         201: "Created",
         202: "Accepted",
         203: "Non-Authoritative Information",
         204: "No Content",
         205: "Reset Content",
         206: "Partial Content",
         300: "Multiple Choices",
         301: "Moved Permanently",
         302: "Moved Temporarily",
         303: "See Other",
         304: "Not Modified",
         305: "Use Proxy",
         400: "Bad Request",
         401: "Unauthorized",
         402: "Payment Required",
         403: "Forbidden",
         404: "Not Found",
         405: "Method Not Allowed",
         406: "Not Acceptable",
         407: "Proxy Authentication Required",
         408: "Request Time-out",
         409: "Conflict",
         410: "Gone",
         411: "Length Required",
         412: "Precondition Failed",
         413: "Request Entity Too Large",
         414: "Request-URI Too Large",
         415: "Unsupported Media Type",
         500: "Internal Server Error",
         501: "Not Implemented",
         502: "Bad Gateway",
         503: "Service Unavailable",
         504: "Gateway Time-out",
         505: "HTTP Version not supported"
     }
     self.default_error = string.join([
         '<head>', '<title>Error response</title>', '</head>', '<body>',
         '<h1>Error response</h1>', '<p>Error code %(code)d.',
         '<p>Message: %(message)s.', '</body>', ''
     ], '\r\n')
     return
Exemplo n.º 9
0
class TransactionManager(Thread):
    tm_counter = Counter(0)

    def __init__(self, channelmonitor=None):
        self.tm_number = self.tm_counter.increment()
        self._keep_running = Event()
        self._
        super(TransactionManager, self).__init__(None, None, repr(self))

    def _set_monitor(self, channelmonitor=None):
        if channelmonitor is None:
            channelmonitor = monitor.ChannelMonitor()
        else:
            assert isinstance(channelmonitor, monitor.ChannelMonitor)
        self.monitor = channelmonitor

    def start(self):
        self._keep_running.set()
        super(TransactionManager, self).start()

    def stop(self):
        self._keep_running.clear()

    def stop_and_wait(self, timeout=None):
        self.stop()
        return self.join(timeout)

    def send_request(self, request):
        host = request.get_host()
        port = request.get_port()
        connectiontype = request.get_type()
        if not self._channels.has_key((host, port, connectiontype)):
            channel = channel.Channel(self.monitor)
            channel.setup_connection(host, port, connectiontype)
            self._channels[(host, port, connectiontype)] = channel
        channel = self._channels[(host, port, connectiontype)]
        return channel.send_request(request)

    def __repr__(self):
        return '<%s.%s %d at %#x>' % (self.__class__.__module__,
                                      self.__class__.__name__, self.tm_number,
                                      id(self))
Exemplo n.º 10
0
class Channel(AsyncChat, object):
    request_manager = Request.singleton.request_manager
    ac_out_buffer_size = 1 << 16
    current_request = None
    channel_counter = Counter()
    linger = struct.pack("ii", 0, 0)

    def __init__(self, server, conn, addr):
        self.channel_number = Channel.channel_counter.increment()
        self.addr = addr
        self.server = server
        # Leaving out connection and map because we set them below.
        AsyncChat.__init__(self)
        self._map = REDUSA_SOCKET_MAP
        self._in_buffer = ''
        self._current_request = None
        self._null_request = _NullRequest()
        self._request_queue = [self._null_request]
        self._keep_alive = 1
        self._last_use = int(time.time())
        self.check_maintenance()
        self.set_socket(conn, self._map)
        self.socket.setblocking(0)
        self._is_connected = True
        self.connected = True
        self._setup_counters()
        self.reset()

    def _setup_counters(self):
        self.request_counter = Counter()
        self.response_counter = Counter()
        self.bytes_out = Counter()
        self.bytes_in = Counter()
        self.read_calls = Counter()
        self.readable_calls = Counter()
        self.write_calls = Counter()
        self.writable_calls = Counter()
        self.buffer_refills = Counter()
        self.refill_skips = Counter()

    def get_statistics(self):
        requests = float(self.request_counter.value)
        responses = float(self.response_counter.value)
        bytesin = float(self.bytes_in.value)
        bytesout = float(self.bytes_out.value)
        readcalls = float(self.read_calls.value)
        readablecalls = float(self.readable_calls.value)
        writecalls = float(self.write_calls.value)
        writablecalls = float(self.writable_calls.value)
        refills = float(self.buffer_refills.value)
        refillskips = float(self.refill_skips.value)
        messages = ['Number of requests: %d' % requests]
        messages.append('Number of responses: %d' % responses)
        messages.append('Bytes read: %d' % bytesin)
        messages.append('Bytes written: %d' % bytesout)
        messages.append('Calls to read: %d' % readcalls)
        messages.append('Calls to readable: %d' % readablecalls)
        messages.append('Calls to write: %d' % writecalls)
        messages.append('Calls to writable: %d' % writablecalls)
        messages.append('Calls to refill buffer: %d' % refills)
        messages.append('Skipped calls to refill buffer: %d' % refillskips)
        averages = []
        if requests:
            bytesperrequest = bytesin / requests
            bytesperread = bytesin / readcalls
            readsperrequest = readcalls / requests
            readablesperread = readablecalls / readcalls
            averages.append('Bytes per request: %0.1f' % bytesperrequest)
            averages.append('Reads per request: %0.1f' % readsperrequest)
            averages.append('Readables per read: %0.1f' % readablesperread)
            averages.append('Bytes per read: %0.1f' % bytesperread)
        if responses:
            bytesperresponse = bytesout / responses
            bytesperwrite = bytesout / writecalls
            writesperresponse = writecalls / responses
            refillsperresponse = refills / responses
            bytesperrefill = bytesout / refills
            responsesperrefill = responses / refills
            writablesperwrite = writablecalls / writecalls
            averages.append('Bytes per response: %0.1f' % bytesperresponse)
            averages.append('Writes per response: %0.1f' % writesperresponse)
            averages.append('Writables per write: %0.1f' % writablesperwrite)
            averages.append('Bytes per write: %0.1f' % bytesperwrite)
            averages.append('Refills per response: %0.1f' % refillsperresponse)
            averages.append('Bytes per refill: %0.1f' % bytesperrefill)
            averages.append('Responses per refill: %0.1f' % responsesperrefill)
        formatted = ['Server channel statistics']
        for message in messages:
            label, value = message.split(': ')
            formatted.append('  --%s: %s' % (label.ljust(25), value))
        formatted.append('    Calculated averages')
        for average in averages:
            label, value = average.split(': ')
            formatted.append('      --%s: %s' % (label.ljust(25), value))
        return '\n'.join(formatted)

    def set_socket(self, sock, map=None):
        AsyncChat.set_socket(self, sock, map)
        # Ensure that we never block waiting for a socket to close.
        self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
                               self.linger)

    def server_name(self):
        return self.server.name

    def close_when_done(self):
        self._keep_alive = 0

    def reset_terminator(self):
        self.set_terminator('\r\n\r\n')

    def reset(self):
        self._current_request = None
        self.reset_terminator()

    def request_handled(self, request):
        if request is self._request_queue[0]:
            self.server.response_ready(self)

    def writable(self):
        self.writable_calls.increment()
        return AsyncChat.writable(self) or self._request_queue[0].writable()

    def refill_buffer(self):
        responsecount = self.response_counter.value
        requests = self._request_queue
        while requests[0].writable():
            self.response_counter.increment()
            self.producer_fifo.push(requests.pop(0).out)
        if requests[0] is self._null_request and not self._keep_alive:
            self.producer_fifo.push(None)
        if responsecount < self.response_counter.value:
            # Discards final call to make averages more pertinent
            self.buffer_refills.increment()
        else:
            self.refill_skips.increment()
        return AsyncChat.refill_buffer(self)

    def readable(self):
        # Use of accepting requests here keeps from blocking asyncore.
        self.readable_calls.increment()
        return (AsyncChat.readable(self)
                and self.request_manager.accepting_requests())

    def __repr__(self):
        ar = AsyncChat.__repr__(self)[1:-1]
        return '<%s channel#: %s requests:%s>' % (ar, self.channel_number,
                                                  self.request_counter)

    def __str__(self):
        ar = AsyncChat.__repr__(self)[1:-1]
        return ('%s, Channel #%s, requests processed: %s' %
                (ar, self.channel_number, self.request_counter))

    def check_maintenance(self):
        if not self.channel_number % self.server.maintenance_interval:
            self.maintenance()

    def maintenance(self):
        self.kill_zombies()

    def kill_zombies(self):
        now = int(time.time())
        for channel in self._map.values():
            if isinstance(channel, Channel):
                if (now - channel._last_use) > channel.server.zombie_timeout:
                    channel.die_if_zombie()

    def die_if_zombie(self):
        if self.writable():
            self._last_use = int(time.time())
        else:
            self.close()

    def send(self, data):
        self.write_calls.increment()
        bytecount = 0
        if self._is_connected:
            bytecount = AsyncChat.send(self, data)
        self.bytes_out.increment(bytecount)
        self.server.bytes_out.increment(bytecount)
        return bytecount

    def recv(self, buffer_size):
        self.read_calls.increment()
        try:
            result = AsyncChat.recv(self, buffer_size)
        except MemoryError:
            sys.exit("Out of Memory!")
        bytecount = len(result)
        self.bytes_in.increment(bytecount)
        self.server.bytes_in.increment(bytecount)
        return result

    def handle_error(self):
        t, v = sys.exc_info()[:2]
        if t is SystemExit:
            raise t, v
        msglog.exception(msglog.types.ERR, None, 'Handled')
        self.close()

    def log(self, *args):
        pass

    def collect_incoming_data(self, data):
        if self._current_request:
            # we are receiving data (probably POST data) for a request
            self._current_request.collect_incoming_data(data)
        else:
            # we are receiving header (request) data
            self._in_buffer += data

    def found_terminator(self):
        self._last_use = int(time.time())
        if self._current_request:
            self._current_request.found_terminator()
        else:
            header, self._in_buffer = self._in_buffer, ''
            lines = string.split(header, '\r\n')
            while lines and not lines[0]:
                lines.pop(0)
            if not lines:
                self.close_when_done()
                return
            request = lines.pop(0)
            try:
                command, uri, version = crack_request(request)
            except:
                if self.server.debug:
                    self.log_info("Ignoring malformed HTTP request: " +
                                  request)
                return
            if '%' in request:
                request = unquote(request)
            if command is None:
                self.log_info('Bad HTTP request: %s' % repr(request), 'error')
                return
            header = _join_headers(lines)
            self._current_request = Request(self, request, command, uri,
                                            version, header)
            requests = self._request_queue
            requests.insert(len(requests) - 1, self._current_request)
            self.request_counter.increment()
            self.server.total_requests.increment()
            self._current_request.found_terminator()

    def push_with_producer(self, producer):
        self.producer_fifo.push(producer)

    def log_info(self, message, type=msglog.types.INFO):
        if type == msglog.types.DB and not self.server.debug:
            return
        prefix = '%s, Channel %s' % (self.server, self.channel_number)
        msglog.log(prefix, type, message)

    def log_statistics(self):
        self.log_info('\n%s\n' % self.get_statistics(), msglog.types.DB)

    def close(self):
        self._is_connected = False
        if self._current_request:
            try:
                self._current_request.handle_close()
            except:
                msglog.exception(prefix='Handled')
        AsyncChat.close(self)
        self.log_info('closed.', msglog.types.DB)

    def add_channel(self, map=None):
        if map is None:
            map = REDUSA_SOCKET_MAP
        assert map is REDUSA_SOCKET_MAP, 'Hack assumes that the map argument is None...'
        return asyncore.dispatcher.add_channel(self, map)

    def del_channel(self, map=None):
        if map is None:
            map = REDUSA_SOCKET_MAP
        assert map is REDUSA_SOCKET_MAP, 'Hack assumes that the map argument is None...'
        return asyncore.dispatcher.del_channel(self, map)
Exemplo n.º 11
0
    class __impl(ImmortalThread):
        tm_counter = Counter(0)

        def __init__(self, timeout=2.0):
            self.timeout = timeout
            self.stations = {}
            self._monitor = monitor.ChannelMonitor(self.timeout)
            self.tm_number = self.tm_counter.increment()
            self._response_tp = ThreadPool(1, 'Jace Response Pool')
            self._pending_responses = Queue()
            self._callbacks = {}
            self._running = False
            self._sync_get_lock = Lock()
            self._last_sync_get = uptime.secs()
            self._cv = Condition()
            ImmortalThread.__init__(self, None, None,
                                    'Jace Transaction Manager')
            return

        def start(self):
            if not self._monitor.is_running():
                self._monitor.start_monitor()
            self._running = True
            self._synchronous_transaction = Transaction(
                self, None, self._bump_cv)
            self._synchronous_transaction.set_timeout(self.timeout)
            ImmortalThread.start(self)
            return

        def stop(self):
            msglog.log('Jace', INFO, 'Stop Jace Prism Transaction Manger')
            if self._monitor.is_running():
                self._monitor.stop_monitor()
            self._running = False
            return

        def run(self):
            msglog.log('Jace', INFO, 'Starting Jace Prism Transaction Manger.')
            while self._running:
                try:
                    self.send_loop()
                    self.response_loop()
                except:
                    msglog.log(
                        'Jace', WARN,
                        'Jace Transaction Manager - error sending next.')
                    msglog.exception()
            return

        def transaction_completion_handler(self, transaction):
            self.tm_number = self.tm_counter.increment()
            try:
                tid = transaction.tid
                s_id, callback = self._callbacks.get(tid)
                if callback:
                    del self._callbacks[tid]
                    self._pending_responses.put(
                        (callback, transaction.get_response()))
            except:
                msglog.exception()
            # recycle the transaction for reuse within the queue
            self.stations.get(s_id).put_transaction(transaction)
            return

        def add_station(self, station):
            s_id = station.get_id()
            self.stations[s_id] = station
            return

        def get_synchronous(self, station, rqst):
            self._sync_get_lock.acquire()
            try:
                t = self._synchronous_transaction
                hdr = self._get_auth_header(station)
                hdr['Connection'] = 'close'
                t.build_request(rqst.url, None, hdr)
                self._cv.acquire()
                try:
                    response = ETimeout()
                    try:
                        t.send_request()
                        self._cv.wait(self.timeout)
                        self._last_sync_get = uptime.secs()
                        if t.is_expired():
                            t.cancel()
                        else:
                            response = t.get_response()
                    except:
                        t.cancel()
                finally:
                    self._cv.release()
                return response
            finally:
                self._sync_get_lock.release()
            return

        def _bump_cv(self, transaction):
            # transaction isn't used
            self._cv.acquire()
            self._cv.notify()
            self._cv.release()
            return

        def send_loop(self):
            for s_id, station in self.stations.items():
                for i in range(station.transaction_limit):
                    try:
                        t, rqst = station.get_next()
                    except Empty:
                        break
                    hdr = self._get_auth_header(station)
                    hdr['Connection'] = 'close'
                    t.build_request(rqst.url, None, hdr)
                    self._callbacks[t.tid] = (s_id, rqst.callback)
                    t.send_request()
            return

        def response_loop(self):
            while 1:
                try:
                    callback, rsp = self._pending_responses.get(False)
                    callback(rsp)
                except Empty:
                    return
                except:
                    msglog.log('Jace', WARN,
                               'Unexpected error in response_loop')
                    msglog.exception()
            return

        def _get_auth_header(self, station):
            return {"Authorization": "Basic %s" % station.base64string}
Exemplo n.º 12
0
class Channel(AsyncChat, object):
    """
        Sender instantiates class with target URL and socket map.
        Socket map is dictionary of asynchronous connections, like 
        this client, that are being managed together.
        
        Once instantiated, the HTTP Client will have created and connected 
        a socket to the target host and port.  Response data should then be 
        added to the client.  This is done by calling 'push(data)', or 
        'push_with_producer(producer)'.
        
        Use 'push' when data is of type string, and is not a producer 
        instance.
        
        Use 'push_with_producer' to push data already contained within a 
        producer type object.  Producer type objects have 'more' method 
        which returns partial data, allowing it to be output as it is 
        sent.
    """
    ac_out_buffer_size = 1 << 16
    channel_counter = Counter()

    def __init__(self, map, debug=0):
        AsyncChat.__init__(self)
        self.channel_number = self.channel_counter.increment()
        self.monitor = self._map = map
        self.debug = debug
        self._fileno = None
        self._keep_alive = 1
        self._using_ssl = False
        self._ssl_context = None
        self._pending_requests = []
        self._sending_requests = []
        self._current_response = None
        self._is_connected = False
        self._connection_initiated = False
        self._accepting_requests = True
        self._is_closed = False
        self._header_buffer = StringIO()
        self._constate_lock = Lock()
        self.reset_terminator()
        self._setup_counters()
        self._born_on = self._last_use = time.time()

    def _setup_counters(self):
        self.request_counter = Counter()
        self.response_counter = Counter()
        self.bytes_out = Counter()
        self.bytes_in = Counter()
        self.read_calls = Counter()
        self.readable_calls = Counter()
        self.write_calls = Counter()
        self.writable_calls = Counter()
        self.buffer_refills = Counter()
        self.refill_skips = Counter()

    def get_statistics(self):
        requests = float(self.request_counter.value)
        responses = float(self.response_counter.value)
        bytesin = float(self.bytes_in.value)
        bytesout = float(self.bytes_out.value)
        readcalls = float(self.read_calls.value)
        readablecalls = float(self.readable_calls.value)
        writecalls = float(self.write_calls.value)
        writablecalls = float(self.writable_calls.value)
        refills = float(self.buffer_refills.value)
        refillskips = float(self.refill_skips.value)
        messages = ['Number of requests: %d' % requests]
        messages.append('Number of responses: %d' % responses)
        messages.append('Bytes read: %d' % bytesin)
        messages.append('Bytes written: %d' % bytesout)
        messages.append('Calls to read: %d' % readcalls)
        messages.append('Calls to readable: %d' % readablecalls)
        messages.append('Calls to write: %d' % writecalls)
        messages.append('Calls to writable: %d' % writablecalls)
        messages.append('Calls to refill buffer: %d' % refills)
        messages.append('Skipped calls to refill buffer: %d' % refillskips)
        averages = []
        if requests:
            # Changes for CSCtg33093 (a.Avoid division by zero)
            bytesperrequest = bytesin / requests
            readsperrequest = readcalls / requests
            bytesperread = readablesperread = 0
            if (readcalls):
                bytesperread = bytesin / readcalls
                readablesperread = readablecalls / readcalls
            averages.append('Bytes per request: %0.1f' % bytesperrequest)
            averages.append('Reads per request: %0.1f' % readsperrequest)
            averages.append('Readables per read: %0.1f' % readablesperread)
            averages.append('Bytes per read: %0.1f' % bytesperread)
        if responses:
            # Changes for CSCtg33093 (a.Avoid division by zero)
            bytesperresponse = bytesout / responses
            writesperresponse = writecalls / responses
            refillsperresponse = refills / responses
            bytesperwrite = writablesperwrite = 0
            if (writecalls):
                bytesperwrite = bytesout / writecalls
                writablesperwrite = writablecalls / writecalls
            bytesperrefill = responsesperrefill = 0
            if (refills):
                bytesperrefill = bytesout / refills
                responsesperrefill = responses / refills

            averages.append('Bytes per response: %0.1f' % bytesperresponse)
            averages.append('Writes per response: %0.1f' % writesperresponse)
            averages.append('Writables per write: %0.1f' % writablesperwrite)
            averages.append('Bytes per write: %0.1f' % bytesperwrite)
            averages.append('Refills per response: %0.1f' % refillsperresponse)
            averages.append('Bytes per refill: %0.1f' % bytesperrefill)
            averages.append('Responses per refill: %0.1f' % responsesperrefill)
        formatted = ['Server channel statistics']
        for message in messages:
            label, value = message.split(': ')
            formatted.append('  --%s: %s' % (label.ljust(25), value))
        formatted.append('    Calculated averages')
        for average in averages:
            label, value = average.split(': ')
            formatted.append('      --%s: %s' % (label.ljust(25), value))
        return '\n'.join(formatted)

    def is_closed(self):
        return self._is_closed

    def request_count(self):
        return self.request_counter.value

    def response_count(self):
        return self.response_counter.value

    def last_used(self):
        return self._last_use

    def created(self):
        return self._born_on

    def get_socket(self):
        return self.socket

    def file_descriptor(self):
        return self._fileno

    def should_monitor_writable(self):
        return self.writable()

    def should_monitor_readable(self):
        return self.readable()

    def reset_channel(self):
        self._current_response = None
        self._header_buffer.seek(0)
        self._header_buffer.truncate()
        self.reset_terminator()

    def reset_terminator(self):
        self.set_terminator('\r\n\r\n')

    def accepting_requests(self, value=None):
        if value is not None:
            self._accepting_requests = value
        return self._accepting_requests and not self._is_closed

    def send_request(self, request):
        if self._is_closed:
            raise TypeError('Cannot send request over closed channel.')
        self._pending_requests.append(request)
        if not self._connection_initiated:
            self.setup_connection(request.get_host(), request.get_port(),
                                  request.get_type())
        self._last_use = time.time()
        self.monitor.check_channels()
        self.request_counter.increment()

    #===============================================================================
    #  Several connection-related methods have been overridden
    #  in order to support dynamically distinguishing between
    #  and supporting secure connections, and to prevent the
    #  channel's file descriptor from being added to the monitor
    #  prematurely.  That 'create_socket' automatically added the
    #  socket's FD to the socket map is a bug because select
    #  performed on unconnected sockets register as readable and
    #  writable; reading from such a socket, however, will return
    #  '', causing it to be closed, and writing to it generates
    #  an I/O, also causing it to be closed.
    #
    #  NOTE that another option is to use the connection flags
    #  in readable / writable decisions, preventing unconnected
    #  channel from being added to socket map.
    #===============================================================================
    def setup_connection(self, host, port, connectiontype):
        self._constate_lock.acquire()
        try:
            if connectiontype == 'http':
                self._using_ssl = False
            elif connectiontype == 'https':
                self._using_ssl = True
            else:
                raise TypeError('Unknown connection type', connectiontype)
            if self.socket is None:
                self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
            if not self._connection_initiated:
                self.connect((host, port))
                self._connection_initiated = True
            elif not (host, port) == self.addr:
                raise TypeError('Channel can only connect to one address.')
        finally:
            self._constate_lock.release()
        self.monitor.add_channel(self)

    def create_socket(self, family, stype):
        assert self.socket is None, 'Socket already created.'
        self.family_and_type = family, type
        if self._using_ssl:
            connection = self._create_secure_socket(family, stype)
        else:
            connection = self._create_insecure_socket(family, stype)
        self.set_socket(connection)

    def _create_insecure_socket(self, family, type):
        connection = socket.socket(family, type)
        connection.setblocking(0)
        return connection

    def _create_secure_socket(self, family, stype):
        self._ssl_context = SSL.Context()
        connection = SSL.Connection(self._ssl_context)
        return connection

    def set_socket(self, connection):
        self.socket = connection
        self._fileno = connection.fileno()

    def connect(self, address):
        assert not self._connection_initiated, 'Cannot connect twice.'
        self.connected = False
        try:
            if self._using_ssl:
                self.socket.setblocking(1)
                try:
                    self.socket.connect(address)
                finally:
                    self.socket.setblocking(0)
                errorvalue = 0
            else:
                errorvalue = self.socket.connect_ex(address)
        except:
            message = 'Failed to connect to address %s.  Exception follows.'
            self.msglog(message % (address, ), msglog.types.WARN)
            self.handle_error()
            raise
        else:
            self.addr = address
            if errorvalue in (0, EISCONN):
                self.connected = True
                self.handle_connect()
            elif errorvalue not in (EINPROGRESS, EALREADY, EWOULDBLOCK):
                message = 'Failed to connect to address %s.'
                self.msglog(message % (address, ), msglog.types.WARN)
                raise socket.error, (errorvalue, errorcode[errorvalue])

    def refill_buffer(self):
        outgoing = len(self.producer_fifo)
        while self._pending_requests:
            request = self._pending_requests.pop(0)
            self._sending_requests.append(request)
            # Bypass AsyncChat's push because it automatically calls initiate
            self.producer_fifo.push(request.get_outgoing_producer())
        if len(self.producer_fifo) > outgoing:
            self.buffer_refills.increment()
        else:
            self.refill_skips.increment()
        return AsyncChat.refill_buffer(self)

    def found_terminator(self):
        if self._current_response is None:
            self._header_buffer.seek(0)
            responseline = self._header_buffer.readline()
            version, status, reason = crack_responseline(responseline)
            if status == 100:
                # Continue header, meaningless.
                self.reset_channel()
                return
            headerlines = self._header_buffer.readlines()
            headers = HeaderDictionary.from_strings(headerlines)
            request = self._sending_requests.pop(0)
            response = Response(version, status, reason.strip(), headers)
            request.set_response(response)
            self._current_response = response
        self._current_response.found_terminator()
        self._handle_response_update()

    def _handle_response_update(self):
        if self._current_response.is_complete():
            self.reset_channel()
            self.response_counter.increment()
        else:
            self.set_terminator(self._current_response.get_terminator())

    def collect_incoming_data(self, data):
        if not data:
            raise ValueError('Collecting empty data!')
        if self._current_response:
            self._current_response.collect_incoming_data(data)
        else:
            self._header_buffer.write(data)

    def writable(self):
        self.writable_calls.increment()
        return AsyncChat.writable(self) or len(self._pending_requests)

    def readable(self):
        self.readable_calls.increment()
        return AsyncChat.readable(self)

    def handle_connect(self):
        self._is_connected = True

    def handle_write(self):
        AsyncChat.handle_write(self)

    def send(self, data):
        self.write_calls.increment()
        result = AsyncChat.send(self, data)
        self.bytes_out.increment(result)
        return result

    def recv(self, buffer_size):
        self.read_calls.increment()
        data = ''
        try:
            data = AsyncChat.recv(self, buffer_size)
            self.bytes_in.increment(len(data))
        except:
            msglog.exception(prefix='Handled')
        return data

    def handle_read(self):
        AsyncChat.handle_read(self)

    def handle_close(self):
        if self._current_response:
            self._current_response.handle_close()
            self._handle_response_update()
        AsyncChat.handle_close(self)

    def close(self):
        self._is_closed = True
        self._is_connected = False
        self.monitor.remove_channel(self)
        sock = self.socket
        if sock:
            sock.close()
        self.debug_msglog('closed')
        pending = self._pending_requests[:]
        sending = self._sending_requests[:]
        self._pending_requests = []
        self._sending_requests = []
        self.debug_msglog('%d requests in pending.' % len(pending))
        for request in pending:
            self.debug_msglog('Pending %r' % request)
            if request.has_response():
                self.debug_msglog('Pending %r' % request)
        self.debug_msglog('%d requests in sending.' % len(sending))
        for request in sending:
            self.debug_msglog('Sending %r' % request)
        if self.is_debuglevel(1):
            self.log_statistics()

    def debug_msglog(self, message, level=1):
        if self.is_debuglevel(level):
            self.msglog(message, msglog.types.DB)

    def log_statistics(self):
        self.msglog('\n%s\n' % self.get_statistics(), msglog.types.DB)

    def is_debuglevel(self, level):
        return level <= self.debug

    def msglog(self, message, mtype=msglog.types.INFO):
        msglog.log('broadway', mtype, '[%s] %s' % (self, message))

    def handle_error(self):
        try:
            messages = ['Handling error.']
            messages.append('Closing connection.')
            messages.append('Exception follows.')
            self.msglog('  '.join(messages), msglog.types.ERR)
            msglog.exception(prefix='Handled')
        finally:
            self.close()

    #####
    #   Semi-crazy method that is working around a sort-of bug within
    #   asyncore.  When using select-based I/O multiplexing, the POLLHUP
    #   the socket state is indicated by the socket becoming readable,
    #   and not by indicating an exceptional event.
    #
    #   When using POLL instead, the flag returned indicates precisely
    #   what the state is because "flags & select.POLLHUP" will be true.
    #
    #   In the former case, when using select-based I/O multiplexing,
    #   select's indication that the the descriptor has become readable
    #   leads to the channel's handle read event method being invoked.
    #   Invoking receive on the socket then returns an empty string,
    #   which is taken by the channel as an indication that the socket
    #   is no longer connected and the channel correctly shuts itself
    #   down.
    #
    #   However, asyncore's current implementation of the poll-based
    #   I/O multiplex event handling invokes the channel's
    #   handle exceptional data event anytime "flags & POLLHUP" is true.
    #   While select-based multiplexing would only call this method when
    #   OOB or urgent data was detected, it can now be called for POLLHUP
    #   events too.
    #
    #   Under most scenarios this is not problematic because poll-based
    #   multiplexing also indicates the descriptor is readable and
    #   so the handle read event is also called and therefore the
    #   channel is properly close, with only an extraneous invocation
    #   to handle exceptional event being a side-effect.  Under certain
    #   situations, however, the socket is not indicated as being
    #   readable, only that it has had an exceptional data event.  It
    #   believe this occurs when the attemtp to connect never succeeds,
    #   but a POLLHUP does.  Previously this lead to a busy loop, which
    #   is what this method fixes.
    ###
    def handle_expt(self):
        if self._is_closed:
            message = 'Handle exceptional event called on closed channel.'
            self.msglog(message, msglog.types.INFO)
            if self.monitor.has_channel(self):
                message = 'Channel %r (fd %d) still being monitoed.  '
                message += 'Close will be invoked explicitly.'
                message = message % (self, self.file_descriptor())
                self.msglog(message, msglog.types.WARN)
                self.close()
                message = 'Handle exceptional event forced close.'
            else:
                message = 'Handle exception event ignored: channel closed'
        else:
            message = 'Channel %r (fd %d) handling exceptional event.  '
            message = message % (self, self.file_descriptor())
            self.msglog(message, msglog.types.INFO)
            try:
                readable, writable, exc = select.select([self._fileno],
                                                        [self._fileno],
                                                        [self._fileno], 0)
                flags, handlers = [], []
                if readable:
                    flags.append('readable')
                    handlers.append(self.handle_read_event)
                if writable:
                    flags.append('writable')
                    handlers.append(self.handle_write_event)
                if exc:
                    flags.append('exception')
                message = 'Select indicates: %s' % string.join(flags, ', ')
                self.msglog(message, msglog.types.INFO)
                while handlers and not self._is_closed:
                    handler = handlers.pop(0)
                    try:
                        handler()
                    except:
                        self.handle_error()
            finally:
                if not self._is_closed:
                    self.close()
                    message = 'Channel with exceptional event still open.  '
                    message += 'Invoked close explicitly to avoid loop.'
                    self.msglog(message, msglog.types.WARN)
            message = 'Exceptional event handled by %s' % repr(self)
        self.msglog(message, msglog.types.INFO)

    def __repr__(self):
        status = ['%s #%d' % (self.__class__.__name__, self.channel_number)]
        try:
            information = []
            if self._is_closed:
                information.append('closed')
            elif self._connection_initiated:
                if self._is_connected:
                    connectiondata = 'connected '
                else:
                    connectiondata = 'connecting '
                if self._using_ssl:
                    connectiondata += 'SSL '
                connectiondata += 'socket [%d]' % self.file_descriptor()
                information.append(connectiondata)
            else:
                information.append('no connection')
            information.append('%d requests' % self.request_counter.value)
            information.append('%d responses' % self.response_counter.value)
            if self.addr is not None:
                try:
                    information.append('%s:%d' % self.addr)
                except TypeError:
                    information.append(repr(self.addr))
            status.extend(['(%s)' % info for info in information])
        except:
            msglog.exception(prefix='Handled')
        return '<%s at %#x>' % (' '.join(status), id(self))

    def __str__(self):
        return '%s #%d' % (self.__class__.__name__, self.channel_number)
Exemplo n.º 13
0
class ConsoleChannel(AsyncChannel):
    counter = Counter()

    def __init__(self, dispatcher, connection):
        super(ConsoleChannel, self).__init__(dispatcher, connection)
        self.number = self.counter.increment()
        self.buffer = ByteBuffer()
        self.setup_console()

    def setup_console(self):
        self.namespace = {'loadtools': self.loadtools}
        self.console = InteractiveSession(self)
        self.set_terminator('\r\n')
        self.console.start()
        self.initprompt()

    def initprompt(self):
        banner = [sys.version]
        banner.append("%s" % self.dispatcher)
        banner.append(str(self.dispatcher))
        banner.append(str(self))
        banner.append('<loadtools() will add standard tools>')
        self.console.prompt("\n".join(banner))

    def push(self, data):
        producer = SimpleProducer(data)
        producer = ByteProducer(producer)
        producer = QuoteProducer(producer)
        producer = LineProducer(producer)
        return self.push_with_producer(producer)

    def loadtools(self):
        from mpx.lib import msglog
        from mpx.lib.node import as_node
        from mpx.lib.node import as_node_url
        self.namespace['msglog'] = msglog
        self.namespace['as_node'] = as_node
        self.namespace['root'] = as_node('/')
        self.namespace['as_node_url'] = as_node_url

    def handle_connect(self):
        pass

    def handle_expt(self):
        self.debugout('%s handling exceptional event.', self, level=0)
        self.close()

    def handle_error(self):
        self.debugout('%s closing due to exception.', self, level=0)
        msglog.exception(prefix='handled')
        self.close()

    def close(self):
        self.console.stop()
        super(ConsoleChannel, self).close()
        self.debugout('%s closed and removed.', self, level=1)

    def recv(self, buffer_size):
        data = super(ConsoleChannel, self).recv(buffer_size)
        unquoted = urllib.unquote(data)
        self.debugout('%s << %r (%r)', self, data, unquoted, level=2)
        return data

    def send(self, data):
        result = super(ConsoleChannel, self).send(data)
        unquoted = urllib.unquote(data)
        self.debugout('%s >> %r (%r)', self, data, unquoted, level=2)
        return result

    def collect_incoming_data(self, bytes):
        self.buffer.write(bytes)

    def found_terminator(self):
        quoted = self.buffer.read()
        command = urllib.unquote(quoted)
        self.debugout('%s console.handle(%r)', self, command, level=1)
        self.console.handle(urllib.unquote(command))

    def __str__(self):
        status = [type(self).__name__]
        status.append('#%03d' % self.number)
        return ' '.join(status)

    def __repr__(self):
        status = [str(self)]
        return '<%s at %#x>' % (status, id(self))

    def debugout(self, message, *args, **kw):
        self.dispatcher.debugout(message, *args, **kw)
Exemplo n.º 14
0
class Transaction(object):
    transaction_counter = Counter()
    timeout = 300

    def __init__(self, request=None, channel=None, **kw):
        self.transaction_number = self.transaction_counter.increment()
        self.debug = 0
        self.sid = None
        if (kw.has_key('sid')):
            self.sid = kw.get('sid')
        self.manager = None
        self.request = None
        self.channel = None
        if request:
            self.set_request(request)
        if channel:
            self.set_channel(channel)
        self._state_listeners = []
        self._complete = Event()
        self._complete.clear()
        self._close_when_done = False
        self._transaction_initiated = None
        self._transaction_completed = None
        self._transaction_created = time.time()

    def set_manager(self, manager):
        self.manager = manager

    def close_when_done(self):
        self._close_when_done = True
        self.request.add_header('Connection', 'close')

    def add_state_listener(self, callback):
        self._state_listeners.append(callback)
        if self.is_complete():
            self._run_callbacks()

    def notify_complete(self):
        self._complete.set()
        if self.debug > 3:
            message = 'Transaction complete:'
            message += '\n\tTransaction: %r' % self
            message += '\n\tRequest: %r' % self.request
            message += '\n\tResponse: %r' % self.get_response()
            message += '\n\tOver channel: %r\n' % self.channel
            msglog.log('broadway', msglog.types.DB, message)
        if self._close_when_done:
            if self.debug > 2:
                message = 'Transaction closing channel:'
                message += '\n\tTransaction: %r' % self
                message += '\n\tChannel: %r' % self.channel
                msglog.log('broadway', msglog.types.DB, message)
            elif self.debug:
                message = '%s closing %s' % (self, self.channel)
                msglog.log('broadway', msglog.types.DB, message)
            self.channel.close()
        self._run_callbacks()
        if self.manager:
            self.manager.handle_completed_transaction(self)

    def handle_timeout(self):
        if self.manager:
            message = '%s notifying manager of timeout' % self
            msglog.log('broadway', msglog.types.INFO, message)
            self.manager.handle_failed_transaction(self)
        else:
            message = 'Transaction handling timeout has no manager: %r'
            msglog.log('broadway', msglog.types.WARN, message % self)

    def handle_error(self):
        warning = msglog.types.WARN
        msglog.log('broadway', warning,
                   'Transaction handling error: %r' % self)
        if not self.is_complete() and self.manager:
            msglog.log('broadway', warning, 'Incomplete, will notify manager')
            self.manager.handle_failed_transaction(self)

    def _run_callbacks(self):
        while self._state_listeners:
            listener = self._state_listeners.pop(0)
            try:
                listener(self)
            except:
                msglog.exception()
        assert len(self._state_listeners) == 0

    def await_completion(self, timeout=None):
        self._complete.wait(timeout)
        return self.is_complete()

    def is_complete(self):
        return self._complete.isSet()

    def set_request(self, request):
        self.request = request
        if request is not None:
            self.request.add_state_listener(self.notify_response_ready)

    def set_response(self, response):
        self.response = response
        self.response.add_state_listener(self.notify_reader_ready)

    def set_reader(self, reader):
        self.reader = reader
        self.reader.add_state_listener(self.notify_response_complete)

    def set_channel(self, channel):
        self.channel = channel
        self.debug = channel.debug

    def set_timeout(self, timeout):
        self.timeout = timeout

    def initiate(self):
        if self.debug > 3:
            message = 'Transaction initiating %s:' % time.ctime()
            message += '\n\tTransaction: %r' % self
            message += '\n\tRequest: %r' % self.request
            message += '\n\tOver channel: %r\n' % self.channel
            msglog.log('broadway', msglog.types.DB, message)
        self._transaction_initiated = time.time()
        self.channel.send_request(self.request)
        if self.debug > 3:
            message = 'Transaction added request to channel %s:' % time.ctime()
            message += '\n\tTransaction: %r' % self
            message += '\n\tRequest: %r' % self.request
            message += '\n\tOver channel: %r\n' % self.channel
            msglog.log('broadway', msglog.types.DB, message)
        if self._close_when_done:
            self.channel.accepting_requests(False)

    def request_sent(self):
        return self._transaction_initiated is not None

    def request_age(self):
        assert self.request_sent()
        return time.time() - self._transaction_initiated

    def transaction_age(self):
        return time.time() - self._transaction_created

    def has_response(self):
        return self.request.has_response()

    def get_response(self):
        return self.request.get_response()

    def notify_response_ready(self, request):
        assert request is self.request
        self.set_response(request.get_response())

    def notify_reader_ready(self, response):
        assert response is self.response
        self.set_reader(response.get_reader())

    def notify_response_complete(self, reader):
        assert reader is self.reader
        self._transaction_completed = time.time()
        self.notify_complete()

    def is_expired(self):
        if self.is_complete():
            return False
        if self.timeout is None:
            return False
        return self.transaction_age() > self.timeout

    def succeeded(self):
        if self.is_complete():
            return self.response.handled_properly()
        elif self.is_expired():
            detail = 'Older than %d seconds' % self.timeout
            raise ETransactionTimeout(self, detail)
        return False

    def cancel(self):
        if not self.is_complete() and self.manager:
            self.manager.handle_failed_transaction()
        msglog.log('broadway', msglog.types.WARN, '%r closing channel' % self)
        try:
            self.channel.close()
        except:
            msglog.exception(prefix='Handled')

    def stats(self):
        status = ['Transaction #%d' % self.transaction_number]
        if self.request_sent():
            status.append('(HTTP %s)' % self.get_response().get_status())
            status.append('(%f sec flight)' % self.get_flighttime())
        else:
            status.append('(pending)')
        return '<%s>' % ' '.join(status)

    def get_flighttime(self):
        if self.request_sent():
            return self._transaction_completed - self._transaction_initiated

    def __repr__(self):
        information = []
        if self.request_sent():
            tinitiated = time.ctime(self._transaction_initiated)
            information.append('request: %s' % tinitiated)
            if self.is_complete():
                tcompleted = time.ctime(self._transaction_completed)
                information.append('response: %s' % tcompleted)
                respstatus = self.get_response().get_status()
                if self.succeeded():
                    information.append('success: %s' % respstatus)
                else:
                    information.append('failure: %s' % respstatus)
                flighttime = (self._transaction_completed -
                              self._transaction_initiated)
                information.append('flight time: %f sec' % flighttime)
            else:
                if self.is_expired():
                    information.append('response expired')
                else:
                    information.append('response pending')
        else:
            information.append('not initiated')
        information = ['(%s)' % detail for detail in information]
        classname = self.__class__.__name__
        transactionnumber = self.transaction_number
        information.insert(0, '%s #%d' % (classname, transactionnumber))
        return '<%s>' % (' '.join(information))
Exemplo n.º 15
0
 def _setup_counters(self):
     self.export_successes = Counter()
     self.export_timeouts = Counter()
     self.export_errors = Counter()
     self.exports_started = Counter()
     self.exports_processed = Counter()
     self.exports_deferred = Counter()
     self.export_transactions = Counter()
     self.exports_scheduled = Counter()
     self.export_exceptions = Counter()
     # Export scheduled following skip.
     self.exports_skipped = Counter()
     # Scheduled export called late.
     self.exports_missed = Counter()
Exemplo n.º 16
0
class PushedSubscription(object):
    subscription_counter = Counter()

    def __init__(self, monitor, target, nodetable, period, retries, sid=None):
        self.subscription_number = self.subscription_counter.increment()
        self._setup_collaborators(monitor)
        self.node_table = nodetable
        self.target = target
        self.period = period
        self.retries = retries
        self.sid = sid
        self.running = Flag()
        self._setup = Flag()
        self._setup_counters()
        self._setup_timers()
        self._setup_trackers()
        super(PushedSubscription, self).__init__()

    def id(self):
        return self.sid

    def is_running(self):
        return self.running.isSet()

    def is_setup(self):
        return self._setup.isSet()

    def get_target(self):
        return self.target

    def start(self, skip=0):
        assert not self.is_running()
        self._reset_timers()
        self._reset_counters()
        self._reset_trackers()
        if not self.is_setup():
            self.setup_subscription()
        self.running.set()
        self.run_timer.start()
        self.exports_skipped.decrement(skip)
        self.schedule_next_export(skip)
        return self.sid

    def stop(self):
        assert self.is_running()
        self.running.clear()
        self.run_timer.stop()
        try:
            self.manager.destroy(self.sid)
        except:
            msglog.exception(prefix="Handled")

    def reset_subscription(self):
        self._export_all_values = True

    def setup_subscription(self):
        assert not self.is_setup()
        try:
            self.sid = self.manager.create_polled(self.node_table, None,
                                                  self.sid)
        except:
            msglog.exception()
        else:
            self._setup.set()
        assert self.is_setup()
        return self.sid

    def teardown_subscription(self):
        assert self.is_setup()
        try:
            self.manager.destroy(self.sid)
        except:
            msglog.exception(prefix="Handled")
        else:
            self._setup.clear()
        assert not self.is_setup()
        return self.sid

    def next_poll_time(self, skip=0):
        offset = self.period + (skip * self.period)
        return ((int(time.time()) / self.period) * self.period) + offset

    def exports_possible(self):
        runtime = self.export_time - self.first_export_time
        return (runtime / self.period)

    def prepare_to_export(self):
        exportstart = time.time()
        self.exports_started.increment()
        self.check_missed_exports(exportstart)
        transaction = self.active_transaction
        if transaction:
            if transaction.is_complete():
                if transaction.succeeded():
                    self.handle_export_success(transaction)
                else:
                    raise ETransactionException(transaction)
            elif transaction.is_expired():
                raise ETransactionTimeout(transaction)
            else:
                raise ETransactionInProgress(transaction)
        self._reset_timers()
        self.export_timer.start(exportstart)

    def get_export_data(self):
        messages = []
        self.poll_timer.start()
        if self._export_all_values:
            dblevel = 2
            messages.append('Polled all values')
            data = self.manager.poll_all(self.sid)
            self._export_all_values = False
        else:
            dblevel = 3
            messages.append('Polled COV values')
            data = self.manager.poll_changed(self.sid)
        self.poll_timer.stop()
        if self.debuglevel(dblevel):
            messages.append('%d values being returned' % len(data))
            self.debugout('%s\n' % '\n\t'.join(messages))
        return data

    def format_export_data(self, data):
        self.debugout('Formatting data.', 2)
        self.format_timer.start()
        data = self.formatter.format(data)
        self.format_timer.stop()
        self.debugout('Data formatted.', 2)
        return data

    def start_export_transaction(self, data):
        self.debugout('Creating export transaction', 2)

        self.active_transaction = self.transporter.transport(self.target,
                                                             data,
                                                             sid=self.sid)
        if (self.active_transaction == None):
            return
        self.transaction_start_timer.start()
        self.transaction_start_timer.stop()
        self.transaction_life_timer.start()
        self.active_transaction.set_timeout(self.period * 3)
        self.active_transaction.add_state_listener(self.notify_complete)
        if self.debuglevel(2):
            self.debugout('Export transaction created', 2)
            if self.debuglevel(4):
                self.debugout('Export transaction data: \n%r\n\n' % data)
        self.exports_processed.increment()

    def schedule_next_export(self, skip=0):
        if not self.is_running():
            message = 'Not rescheduling because subscription stopped.'
            self.debugout(message, 1)
            return
        nextexport = self.next_poll_time(skip)
        if self.export_time is None:
            self.first_export_time = nextexport
        self.export_time = nextexport
        self.monitor.schedule_subscription(self, nextexport)
        self.exports_skipped.increment(skip)
        self.exports_scheduled.increment()
        return nextexport

    def handle_export_success(self, transaction):
        self.active_transaction = None
        self.export_successes.increment()

    def handle_export_timeout(self, error):
        """
            Transaction still pending and time exceeded export 
            period, which is also transport timeout value.
            No data has been sent to the server.
        """
        try:
            transaction = error.transaction
            errors = self.export_timeouts.pre_increment()
            message = 'Handling timed out export: %s' % transaction
            self.msglog(message, msglog.types.WARN)
            try:
                transaction.handle_timeout()
            except:
                msglog.exception()
            self.record_failed_export(transaction, errors)
        finally:
            self.active_transaction = None
            self.schedule_next_export(1)

    def handle_export_pending(self, error):
        self.exports_deferred.increment()
        self.debugout('Previous export still pending.', 1)
        self.schedule_next_export(0)

    def handle_export_failure(self, error):
        """
            Transaction completed but returned error response 
            code.  Data was sent to the server and a response 
            was sent back, but the response code indicates that 
            the server failed to handle the request properly.
        """
        try:
            transaction = error.transaction
            failures = self.export_errors.pre_increment()
            if self.debuglevel(2):
                message = 'Handling errored out export: %r' % transaction
                self.msglog(message, msglog.types.WARN)
                self.record_failed_export(transaction, failures)
        finally:
            self.active_transaction = None
            self.schedule_next_export(1)

    def handle_export_exception(self, error, stage=None):
        """
            An uncaught exception was raised during export 
            process.  This indicates that one of the export 
            methods raised an exception.  The status may be 
            anything from uninitialized to the request having 
            been sent, and the response having been received.
            
            This method resets the subscription and reschedules 
            next export after skipping one period.
        """
        if not self.is_running():
            message = 'Ignoring exception because subscription stopped.'
            self.debugout(message, 1)
            return
        messages = ['Handling export exception']
        if isinstance(error, ETransactionInProgress):
            self.handle_export_pending(error)
        elif isinstance(error, ETransactionTimeout):
            self.handle_export_timeout(error)
        elif isinstance(error, ETransactionException):
            self.handle_export_failure(error)
        else:
            try:
                warning = msglog.types.WARN
                self.msglog('Handling uknown exception', warning)
                msglog.exception(prefix='handling')
                self.reset_subscription()
                self.msglog('Subscription reset', msglog.types.INFO)
                self.export_timer.stop()
                self.msglog('Export timer stopped', msglog.types.INFO)
                messages.append('Export timer stopped.')
                transaction = self.active_transaction
                if transaction:
                    try:
                        transaction.handle_error()
                    except:
                        self.msglog('Notify transaction failed.', warning)
                        msglog.exception(prefix='Handled')
                    else:
                        self.msglog('Transaction notified of failure',
                                    msglog.types.INFO)
            finally:
                self.active_transaction = None
                self.schedule_next_export(1)
                self.msglog('One export will be skipped', warning)

    def check_missed_exports(self, actual):
        scheduled = self.export_time
        exportdelta = actual - scheduled
        exportsmissed = int(exportdelta / self.period)
        if exportsmissed > 0:
            messages = ['Missed exports detected']
            messages.append('Scheduled export: %s' % time.ctime(scheduled))
            messages.append('Actual export: %s' % time.ctime(actual))
            messages.append('Configured period: %s' % self.period)
            messages.append('Delta between exports: %s' % exportdelta)
            messages.append('Periods missed: %s' % exportsmissed)
            self.msglog('%s\n' % '\n\t'.join(messages), msglog.types.WARN)
            self.exports_missed.increment(exportsmissed)
        return exportsmissed

    def record_successful_export(self, transaction, successes):
        messages = ['%s:' % self.toString()]
        runtime = self.export_time - self.first_export_time
        average = runtime / successes
        if self.debuglevel(2):
            message = 'Exports: %d, runtime: %0.1f sec'
            messages.append(message % (successes, runtime))
        if self.debuglevel(2):
            message = 'Period: %0.2f sec, effective: ~ %0.2f sec'
        else:
            message = '(%0.0f => ~%0.2f)'
        messages.append(message % (self.period, average))
        if self.debuglevel(2):
            messages.append(transaction.stats()[1:-1])
            messages.append(', '.join(self._timer_strings()))
            messages = ['\t\t- %s' % message for message in messages]
            messages.insert(0, 'Statistics of completed export:')
            message = '%s\n' % '\n'.join(messages)
        else:
            timeitems = [(timer.get_name().lower(), timer.get_lapse())
                         for timer in self._get_timers()
                         if (timer.get_start() and timer.get_stop())]
            flighttime = transaction.get_flighttime()
            timeitems.append(('flight', flighttime))
            timestrs = ['(%s %0.2f)' % item for item in timeitems]
            messages.extend(timestrs)
            message = ' '.join(messages)
        self.debugout(message, 1)

    def record_failed_export(self, transaction, failures, outputdata=False):
        warning = msglog.types.WARN
        information = msglog.types.INFO
        self.msglog('Transaction failed: %r' % transaction, information)
        self.msglog('Failed request: %r' % transaction.request, information)
        if outputdata or self.debuglevel(2):
            message = 'Failed request data: \n%r\n'
            self.debugout(message % transaction.request.data)
        if transaction.is_complete():
            response = transaction.get_response()
            self.msglog('Failed response: %r.' % response, information)
            message = 'Failed response data: \n%r\n'
            self.debugout(message % response.read(), 0)

    def notify_complete(self, transaction):
        if transaction is self.active_transaction:
            self.transaction_life_timer.stop()
            self.export_timer.stop()
            if transaction.succeeded():
                successes = self.export_successes.value + 1
                if self.debuglevel(1):
                    self.monitor.enqueue_work(self.record_successful_export,
                                              transaction, successes)
            else:
                failures = self.export_errors.value + 1
                self.monitor.enqueue_work(self.record_failed_export,
                                          transaction, failures)
        else:
            messages = ['Completed transaction is not current']
            messages.append('Current: %r' % self.active_transaction)
            messages.append('Completed: %r' % transaction)
            self.msglog('\n\t- '.join(messages), msglog.types.WARN)

    def _setup_collaborators(self, monitor):
        self.monitor = monitor
        self.manager = monitor.get_subscription_manager()
        self.formatter = monitor.get_formatter()
        self.transporter = monitor.get_transporter()

    def _setup_counters(self):
        self.export_successes = Counter()
        self.export_timeouts = Counter()
        self.export_errors = Counter()
        self.exports_started = Counter()
        self.exports_processed = Counter()
        self.exports_deferred = Counter()
        self.export_transactions = Counter()
        self.exports_scheduled = Counter()
        self.export_exceptions = Counter()
        # Export scheduled following skip.
        self.exports_skipped = Counter()
        # Scheduled export called late.
        self.exports_missed = Counter()

    def _reset_counters(self):
        self.export_successes.reset()
        self.export_timeouts.reset()
        self.export_errors.reset()
        self.exports_started.reset()
        self.exports_processed.reset()
        self.exports_deferred.reset()
        self.export_transactions.reset()
        self.exports_scheduled.reset()
        self.export_exceptions.reset()
        self.exports_skipped.reset()
        self.exports_missed.reset()

    def _setup_timers(self):
        self.run_timer = Timer('Runtime')
        self.poll_timer = Timer('Poll')
        self.format_timer = Timer('Format')
        self.export_timer = Timer('Export')
        self.transaction_start_timer = Timer('Trans start')
        self.transaction_life_timer = Timer('Trans life')

    def _reset_timers(self):
        self.poll_timer.reset()
        self.format_timer.reset()
        self.export_timer.reset()
        self.transaction_start_timer.reset()
        self.transaction_life_timer.reset()

    def _setup_trackers(self):
        self.export_time = None
        self.first_export_time = None
        self.active_transaction = None
        self._export_all_values = False

    def _reset_trackers(self):
        self._setup_trackers()

    def _timer_string(self, prefix='\t', sep='\n'):
        timestrs = [
            '%s%s' % (prefix, timestr) for timstr in self._timer_strings()
        ]
        return sep.join(timestrs)

    def _timer_strings(self):
        timers = self._get_timers()
        return map(str, timers)

    def _get_timers(self):
        return [
            self.export_timer, self.poll_timer, self.format_timer,
            self.transaction_start_timer, self.transaction_life_timer
        ]

    def debugout(self, dbmessage, dblevel=1):
        if self.debuglevel(dblevel):
            self.msglog(dbmessage, msglog.types.DB)

    def debuglevel(self, level=1):
        return level <= DEBUG

    def msglog(self, message, mtype=msglog.types.INFO, autoprefix=True):
        if autoprefix:
            message = '[%s] %s' % (self, message)
        msglog.log('broadway', mtype, message)

    def as_dictionary(self):
        configuration = {}
        configuration['monitor'] = self.monitor.url
        configuration['target'] = self.target
        configuration['nodes'] = self.node_table
        configuration['period'] = self.period
        configuration['retries'] = self.retries
        configuration['sid'] = self.sid
        return configuration

    def from_dictionary(klass, configuration):
        if not configuration.has_key('monitor'):
            msglog.log(
                'broadway', msglog.types.WARN,
                'Creating monitor URL from formatter parent to '
                'recreate subscription from %r' % configuration)
            formatter = rootspace.as_node(configuration['formatter'])
            configuration['monitor'] = formatter.parent.url
        monitor = rootspace.as_node(configuration['monitor'])
        target = configuration['target']
        nodes = configuration['nodes']
        period = configuration['period']
        retries = configuration['retries']
        sid = configuration['sid']
        return klass(monitor, target, nodes, period, retries, sid)

    from_dictionary = classmethod(from_dictionary)

    def __repr__(self):
        classname = self.__class__.__name__
        subscriptionnumber = self.subscription_number
        details = ['%s #%04d [%s]' % (classname, subscriptionnumber, self.sid)]
        counts = ['%dES' % self.exports_started.value]
        counts.append('%dEM' % self.exports_missed.value)
        counts.append('%dED' % self.exports_deferred.value)
        counts.append('%dEP' % self.exports_processed.value)
        counts.append('%dGD' % self.export_successes.value)
        counts.append('%dBD' % self.export_errors.value)
        counts.append('%dTO' % self.export_timeouts.value)
        counts.append('%dSK' % self.exports_skipped.value)
        details.append('(%s)' % '/'.join(counts))
        return '<%s>' % ' '.join(details)

    def toString(self):
        details = ['PS #%04d' % self.subscription_number]
        counts = ['%dES' % self.exports_started.value]
        counts.append('%dEM' % self.exports_missed.value)
        counts.append('%dED' % self.exports_deferred.value)
        counts.append('%dEP' % self.exports_processed.value)
        counts.append('%dGD' % self.export_successes.value)
        counts.append('%dBD' % self.export_errors.value)
        counts.append('%dTO' % self.export_timeouts.value)
        counts.append('%dSK' % self.exports_skipped.value)
        details.append('(%s)' % '/'.join(counts))
        return ' '.join(details)

    def __str__(self):
        classname = self.__class__.__name__
        return '%s #%d %r' % (classname, self.subscription_number, self.sid)