def __init__(self, max_wait_for_replay, max_queue_size, throttling_delay): self._MAX_WAIT_FOR_REPLAY = max_wait_for_replay self._MAX_QUEUE_SIZE = max_queue_size self._THROTTLING_DELAY = throttling_delay self._flush_without_ioloop = False # useful for tests self._transactions = [] # List of all non commited transactions self._total_count = 0 # Maintain size/count not to recompute it everytime self._total_size = 0 self._flush_count = 0 self._transactions_received = 0 self._transactions_flushed = 0 self._too_big_count = 0 # Global counter to assign a number to each transaction: we may have an issue # if this overlaps self._counter = 0 self._trs_to_flush = None # Current transactions being flushed self._last_flush = datetime.utcnow() # Last flush (for throttling) # Track an initial status message. ForwarderStatus().persist()
def flush(self): if self._trs_to_flush is not None: log.debug("A flush is already in progress, not doing anything") return to_flush = [] # Do we have something to do ? now = datetime.now() for tr in self._transactions: if tr.time_to_flush(now): to_flush.append(tr) count = len(to_flush) if count > 0: log.debug("Flushing %s transaction%s" % (count, plural(count))) self._trs_to_flush = to_flush self.flush_next() self._flush_count += 1 ForwarderStatus( queue_length=self._total_count, queue_size=self._total_size, flush_count=self._flush_count, transactions_received=self._transactions_received, transactions_flushed=self._transactions_flushed).persist()
def flush(self): if self._trs_to_flush is not None: log.debug("A flush is already in progress, not doing anything") return to_flush = [] # Do we have something to do ? now = datetime.utcnow() for tr in self._transactions: if tr.time_to_flush(now): to_flush.append(tr) count = len(to_flush) should_log = self._flush_count + 1 <= FLUSH_LOGGING_INITIAL or ( self._flush_count + 1) % FLUSH_LOGGING_PERIOD == 0 if count > 0: if should_log: log.info("Flushing %s transaction%s during flush #%s" % (count, plural(count), str(self._flush_count + 1))) else: log.debug("Flushing %s transaction%s during flush #%s" % (count, plural(count), str(self._flush_count + 1))) self._endpoints_errors = {} self._finished_flushes = 0 # We sort LIFO-style, taking into account errors self._trs_to_flush = sorted(to_flush, key=lambda tr: (-tr._error_count, tr._id)) self._flush_time = datetime.utcnow() self.flush_next() else: if should_log: log.info("No transaction to flush during flush #%s" % str(self._flush_count + 1)) else: log.debug("No transaction to flush during flush #%s" % str(self._flush_count + 1)) if self._flush_count + 1 == FLUSH_LOGGING_INITIAL: log.info( "First flushes done, next flushes will be logged every %s flushes." % FLUSH_LOGGING_PERIOD) self._flush_count += 1 ForwarderStatus( queue_length=self._total_count, queue_size=self._total_size, flush_count=self._flush_count, transactions_received=self._transactions_received, transactions_flushed=self._transactions_flushed, transactions_rejected=self._transactions_rejected).persist()
def tr_error_too_big(self, tr): tr.inc_error_count() log.warn("Transaction %d is %sKB, it has been rejected as too large. \ It will not be replayed." % (tr.get_id(), tr.get_size() / 1024)) self._transactions.remove(tr) self._total_count -= 1 self._total_size -= tr.get_size() self._transactions_flushed += 1 self.print_queue_stats() self._too_big_count += 1 ForwarderStatus(queue_length=self._total_count, queue_size=self._total_size, flush_count=self._flush_count, transactions_received=self._transactions_received, transactions_flushed=self._transactions_flushed, too_big_count=self._too_big_count).persist()
def tr_error_reject_request(self, tr, response_code): self._running_flushes -= 1 self._finished_flushes += 1 tr.inc_error_count() log.warn( "Transaction %d has been rejected (code %d, size %sKB), it will not be replayed", tr.get_id(), response_code, tr.get_size() / 1024) self._remove(tr) self._transactions_flushed += 1 self.print_queue_stats() self._transactions_rejected += 1 ForwarderStatus( queue_length=self._total_count, queue_size=self._total_size, flush_count=self._flush_count, transactions_received=self._transactions_received, transactions_flushed=self._transactions_flushed, transactions_rejected=self._transactions_rejected).persist()
def tr_error(self, tr): self._running_flushes -= 1 self._finished_flushes += 1 tr.inc_error_count() if tr.get_error_count() > self._MAX_ENDPOINT_ERRORS: log.warn("Transaction %d failed too many (%d) times, removing", tr.get_id(), tr.get_error_count()) self._remove(tr) self._transactions_flushed += 1 self.print_queue_stats() self._transactions_rejected += 1 ForwarderStatus( queue_length=self._total_count, queue_size=self._total_size, flush_count=self._flush_count, transactions_received=self._transactions_received, transactions_flushed=self._transactions_flushed, transactions_rejected=self._transactions_rejected).persist() else: tr.compute_next_flush(self._MAX_WAIT_FOR_REPLAY) log.warn( "Transaction %d in error (%s error%s), it will be replayed after %s", tr.get_id(), tr.get_error_count(), plural(tr.get_error_count()), tr.get_next_flush()) self._endpoints_errors[tr._endpoint] = self._endpoints_errors.get( tr._endpoint, 0) + 1 # Endpoint failed too many times, it's probably an enpoint issue # Let's avoid blocking on it if self._endpoints_errors[ tr._endpoint] == self._MAX_ENDPOINT_ERRORS: new_trs_to_flush = [] for transaction in self._trs_to_flush: if transaction._endpoint != tr._endpoint: new_trs_to_flush.append(transaction) else: transaction.compute_next_flush( self._MAX_WAIT_FOR_REPLAY) log.debug( 'Endpoint %s seems down, removed %s transaction from current flush', tr._endpoint, len(self._trs_to_flush) - len(new_trs_to_flush)) self._trs_to_flush = new_trs_to_flush
def flush(self): if self._trs_to_flush is not None: log.debug("A flush is already in progress, not doing anything") return to_flush = [] now = datetime.utcnow() for tr in self._transactions: if tr.time_to_flush(now): to_flush.append(tr) count = len(to_flush) should_log = self._flush_count + 1 <= FLUSH_LOGGING_INITIAL or ( self._flush_count + 1) % FLUSH_LOGGING_PERIOD == 0 if count > 0: if should_log: log.info( "Flushing %s transaction%s during flush #%s" % (count, plural(count), str(self._flush_count + 1))) else: log.debug( "Flushing %s transaction%s during flush #%s" % (count, plural(count), str(self._flush_count + 1))) self._trs_to_flush = to_flush self.flush_next() else: if should_log: log.info("No transaction to flush during flush #%s" % str(self._flush_count + 1)) else: log.debug("No transaction to flush during flush #%s" % str(self._flush_count + 1)) if self._flush_count + 1 == FLUSH_LOGGING_INITIAL: log.info("First flushes done, next flushes will be logged every %s flushes." % FLUSH_LOGGING_PERIOD) self._flush_count += 1 ForwarderStatus( queue_length=self._total_count, queue_size=self._total_size, flush_count=self._flush_count, transactions_received=self._transactions_received, transactions_flushed=self._transactions_flushed).persist()
def __init__(self, max_wait_for_replay, max_queue_size, throttling_delay): self._MAX_WAIT_FOR_REPLAY = max_wait_for_replay self._MAX_QUEUE_SIZE = max_queue_size self._THROTTLING_DELAY = throttling_delay self._flush_without_ioloop = False self._transactions = [] self._total_count = 0 self._total_size = 0 self._flush_count = 0 self._transactions_received = 0 self._transactions_flushed = 0 self._counter = 0 self._trs_to_flush = None self._last_flush = datetime.utcnow() ForwarderStatus().persist()
def tr_error_reject_request(self, tr): self._running_flushes -= 1 self._finished_flushes += 1 tr.inc_error_count() log.warn( "Transaction %d is %sKB, it has been rejected as too large. " "It will not be replayed.", tr.get_id(), tr.get_size() / 1024) self._transactions.remove(tr) self._total_count -= 1 self._total_size -= tr.get_size() self._transactions_flushed += 1 self.print_queue_stats() self._transactions_rejected += 1 ForwarderStatus( queue_length=self._total_count, queue_size=self._total_size, flush_count=self._flush_count, transactions_received=self._transactions_received, transactions_flushed=self._transactions_flushed, transactions_rejected=self._transactions_rejected).persist()
def __init__(self, max_wait_for_replay, max_queue_size, throttling_delay, max_parallelism=1, max_endpoint_errors=4): self._MAX_WAIT_FOR_REPLAY = max_wait_for_replay self._MAX_QUEUE_SIZE = max_queue_size self._THROTTLING_DELAY = throttling_delay self._MAX_PARALLELISM = max_parallelism self._MAX_ENDPOINT_ERRORS = max_endpoint_errors self._MAX_FLUSH_DURATION = timedelta(seconds=10) self._flush_without_ioloop = False # useful for tests self._transactions = [] # List of all non commited transactions self._total_count = 0 # Maintain size/count not to recompute it everytime self._total_size = 0 self._flush_count = 0 self._running_flushes = 0 self._transactions_received = 0 self._transactions_flushed = 0 self._transactions_rejected = 0 # Global counter to assign a number to each transaction: we may have an issue # if this overlaps self._counter = 0 self._trs_to_flush = None # Current transactions being flushed self._last_flush = datetime.utcnow() # Last flush (for throttling) # Error management self._endpoints_errors = {} self._finished_flushes = 0 # Track an initial status message. ForwarderStatus().persist()