def send(self, message): message = utils.encode(message) if self.load_balance: destination_queue = self.destination_queues[ self.load_balance_iterator] try: self.pipe.lpush(destination_queue, message) except Exception as exc: if 'Cannot assign requested address' in exc.args[0]: raise MemoryError elif 'Redis is configured to save RDB snapshots, but is currently not able to persist on disk' in exc.args[0]: raise IOError(28, 'No space left on device. Redis can\'t save its snapshots.') raise exceptions.PipelineError(exc) self.load_balance_iterator += 1 if self.load_balance_iterator == len(self.destination_queues): self.load_balance_iterator = 0 else: for destination_queue in self.destination_queues: try: self.pipe.lpush(destination_queue, message) except Exception as exc: raise exceptions.PipelineError(exc)
def _send(self, destination_queue, message, reconnect=True): self.check_connection() retval = False try: retval = self.channel.basic_publish( exchange=self.exchange, routing_key=destination_queue, body=message, properties=self.properties, mandatory=True, ) except Exception as exc: # UnroutableError, NackError in 1.0.0 if reconnect and isinstance(exc, pika.exceptions.ConnectionClosed): self.logger.debug( 'Error sending the message. ' 'Will re-connect and re-send.', exc_info=True) self.connect() self._send(destination_queue, message, reconnect=False) else: raise exceptions.PipelineError(exc) else: if not self.publish_raises_nack and not retval: raise exceptions.PipelineError( 'Sent message was not confirmed.')
def send(self, message, path="_default", path_permissive=False): if path not in self.destination_queues and path_permissive: return message = utils.encode(message) try: queues = self.destination_queues[path] except KeyError as exc: raise exceptions.PipelineError(exc) if self.load_balance: queues = [queues[self.load_balance_iterator]] self.load_balance_iterator += 1 if self.load_balance_iterator == len( self.destination_queues[path]): self.load_balance_iterator = 0 for destination_queue in queues: try: self.pipe.lpush(destination_queue, message) except Exception as exc: if 'Cannot assign requested address' in exc.args[0] or \ "OOM command not allowed when used memory > 'maxmemory'." in exc.args[0]: raise MemoryError(exc.args[0]) elif 'Redis is configured to save RDB snapshots, but is currently not able to persist on disk' in exc.args[ 0]: raise IOError( 28, 'No space left on device or in memory. Redis can\'t save its snapshots. ' 'Look at redis\'s logs.') raise exceptions.PipelineError(exc)
def _acknowledge(self): try: retval = self.pipe.rpop(self.internal_queue) except Exception as e: raise exceptions.PipelineError(e) else: if not retval: raise exceptions.PipelineError("Could not pop message from internal queue " "for acknowledgement. Return value was %r." "" % retval)
def clear_queue(self, queue): """Clears a queue by removing (deleting) the key, which is the same as an empty list in Redis""" try: retval = self.pipe.delete(queue) except Exception as exc: raise exceptions.PipelineError(exc) else: if retval not in (0, 1): raise exceptions.PipelineError("Error on redis queue deletion: Return value" " was not 0 or 1 but %r." % retval)
def clear_queue(self, queue): """Clears a queue by removing (deleting) the key, which is the same as an empty list in Redis""" try: return self.pipe.delete(queue) except Exception as exc: raise exceptions.PipelineError(exc)
def send(self, message: str, path="_default", path_permissive=False) -> None: """ In principle we could use AMQP's exchanges here but that architecture is incompatible to the format of our pipeline.conf file. """ if path not in self.destination_queues and path_permissive: return message = utils.encode(message) try: queues = self.destination_queues[path] except KeyError as exc: raise exceptions.PipelineError(exc) if self.load_balance: queues = [queues[self.load_balance_iterator]] self.load_balance_iterator += 1 if self.load_balance_iterator == len( self.destination_queues[path]): self.load_balance_iterator = 0 for destination_queue in queues: self._send(destination_queue, message)
def _send(self, destination_queue, message): retval = False try: retval = self.channel.basic_publish( exchange='', routing_key=destination_queue, body=message, properties=self.properties, mandatory=True, ) except Exception as exc: # UnroutableError, NackError in 1.0.0 raise exceptions.PipelineError(exc) else: if not self.publish_raises_nack and not retval: raise exceptions.PipelineError( 'Sent message was not confirmed.')
def receive(self): try: if self.pipe.llen(self.internal_queue) > 0: return utils.decode(self.pipe.lindex(self.internal_queue, -1)) return utils.decode(self.pipe.brpoplpush(self.source_queue, self.internal_queue, 0)) except Exception as exc: raise exceptions.PipelineError(exc)
def receive(self) -> str: if self._has_message: raise exceptions.PipelineError("There's already a message, first " "acknowledge the existing one.") retval = self._receive() self._has_message = True return utils.decode(retval)
def acknowledge(self): try: self.channel.basic_ack(delivery_tag=self.delivery_tag) except pika.exceptions.ConnectionClosed: self.connect() self.channel.basic_ack(delivery_tag=self.delivery_tag) except Exception as e: raise exceptions.PipelineError(e)
def count_queued_messages(self, *queues) -> dict: queue_dict = {} for queue in queues: try: queue_dict[queue] = self.pipe.llen(queue) except Exception as exc: raise exceptions.PipelineError(exc) return queue_dict
def receive(self): try: retval = self.pipe.lindex(self.internal_queue, -1) # returns None if no value if not retval: retval = self.pipe.brpoplpush(self.source_queue, self.internal_queue, 0) return utils.decode(retval) except Exception as exc: raise exceptions.PipelineError(exc)
def receive(self): try: if self.pipe.llen(self.internal_queue) > 0: return utils.decode(self.pipe.lindex(self.internal_queue, -1)) return utils.decode( self.pipe.brpoplpush(self.source_queue, self.internal_queue, 0)) except redis.exceptions.ConnectionError: pass # raised e.g. on SIGHUP except Exception as exc: raise exceptions.PipelineError(exc)
def receive(self): if self.source_queue is None: raise exceptions.ConfigurationError('pipeline', 'No source queue given.') try: retval = self.pipe.lindex(self.internal_queue, -1) # returns None if no value if not retval: retval = self.pipe.brpoplpush(self.source_queue, self.internal_queue, 0) return utils.decode(retval) except Exception as exc: raise exceptions.PipelineError(exc)
def _receive(self) -> bytes: if self.source_queue is None: raise exceptions.ConfigurationError('pipeline', 'No source queue given.') try: method, header, body = next(self.channel.consume(self.source_queue)) if method: self.delivery_tag = method.delivery_tag except Exception as exc: raise exceptions.PipelineError(exc) else: return body
def send(self, message): message = utils.encode(message) if self.load_balance: destination_queue = self.destination_queues[ self.load_balance_iterator] try: self.pipe.lpush(destination_queue, message) except Exception as exc: raise exceptions.PipelineError(exc) self.load_balance_iterator += 1 if self.load_balance_iterator == len(self.destination_queues): self.load_balance_iterator = 0 else: for destination_queue in self.destination_queues: try: self.pipe.lpush(destination_queue, message) except Exception as exc: raise exceptions.PipelineError(exc)
def test_PipelineError(self): message = 'some error' source = ValueError(message) try: try: raise source except ValueError as exc: raise excs.PipelineError(exc) except excs.PipelineError as exc: exception = exc if sys.version_info < (3, 7): self.assertEqual(exception.args, ('pipeline failed - ValueError(%r,)' % message, )) else: self.assertEqual(exception.args, ('pipeline failed - ValueError(%r)' % message, )) message = 'some error' notanexception = excs.PipelineError(message) self.assertEqual(notanexception.args, ('pipeline failed - %r' % message, ))
def _acknowledge(self): try: self.channel.basic_ack(delivery_tag=self.delivery_tag) except pika.exceptions.ConnectionClosed: self.logger.debug('Error sending the message. ' 'Will re-connect and re-send.', exc_info=True) self.connect() self.channel.basic_ack(delivery_tag=self.delivery_tag) except Exception as e: raise exceptions.PipelineError(e) else: self.delivery_tag = None
def _receive(self) -> bytes: """ Receives the last not yet acknowledged message. Does not block unlike the other pipelines. """ if len(self.state[self.internal_queue]) > 0: return utils.decode(self.state[self.internal_queue][0]) try: first_msg = self.state[self.source_queue].pop(0) except IndexError as exc: raise exceptions.PipelineError(exc) self.state[self.internal_queue].append(first_msg) return first_msg
def acknowledge(self): """ Acknowledge/delete the current message from the source queue Parameters: Raises: exceptions: exceptions.PipelineError: If no message is held Returns: None """ if not self._has_message: raise exceptions.PipelineError("No message to acknowledge.") self._acknowledge() self._has_message = False
def _receive(self) -> bytes: if self.source_queue is None: raise exceptions.ConfigurationError('pipeline', 'No source queue given.') try: while True: try: retval = self.pipe.lindex(self.internal_queue, -1) # returns None if no value except redis.exceptions.BusyLoadingError: # Just wait at redis' startup #1334 time.sleep(1) else: break if not retval: retval = self.pipe.brpoplpush(self.source_queue, self.internal_queue, 0) except Exception as exc: raise exceptions.PipelineError(exc) else: return retval
def start(self, starting: bool = True, error_on_pipeline: bool = True, error_on_message: bool = False, source_pipeline: Optional[str] = None, destination_pipeline: Optional[str] = None): self.__source_pipeline = source_pipeline self.__destination_pipeline = destination_pipeline while True: try: if not starting and (error_on_pipeline or error_on_message): self.logger.info('Bot will continue in %s seconds.', self.parameters.error_retry_delay) time.sleep(self.parameters.error_retry_delay) if error_on_message: error_on_message = False if error_on_pipeline: try: self.__connect_pipelines() except Exception as exc: raise exceptions.PipelineError(exc) else: error_on_pipeline = False if starting: starting = False self.__handle_sighup() self.process() self.__error_retries_counter = 0 # reset counter if self.parameters.rate_limit and self.run_mode != 'scheduled': self.__sleep() if self.collector_empty_process and self.run_mode != 'scheduled': self.__sleep(1, log=False) except exceptions.PipelineError as exc: error_on_pipeline = True if self.parameters.error_log_exception: self.logger.exception('Pipeline failed.') else: self.logger.error(utils.error_message_from_exc(exc)) self.logger.error('Pipeline failed.') self.__disconnect_pipelines() except Exception as exc: # in case of serious system issues, exit immediately if isinstance(exc, MemoryError): self.logger.exception('Out of memory. Exit immediately. Reason: %r.' % exc.args[0]) self.stop() elif isinstance(exc, (IOError, OSError)) and exc.errno == 28: self.logger.exception('Out of disk space. Exit immediately.') self.stop() error_on_message = sys.exc_info() if self.parameters.error_log_exception: self.logger.exception("Bot has found a problem.") else: self.logger.error(utils.error_message_from_exc(exc)) self.logger.error("Bot has found a problem.") if self.parameters.error_log_message: # Print full message if explicitly requested by config self.logger.info("Current Message(event): %r.", self.__current_message) # In case of permanent failures, stop now if isinstance(exc, exceptions.ConfigurationError): self.stop() except KeyboardInterrupt: self.logger.info("Received KeyboardInterrupt.") self.stop(exitcode=0) finally: if getattr(self.parameters, 'testing', False): self.stop(exitcode=0) break if error_on_message or error_on_pipeline: self.__message_counter["failure"] += 1 self.__error_retries_counter += 1 # reached the maximum number of retries if (self.__error_retries_counter > self.parameters.error_max_retries): if error_on_message: if self.parameters.error_dump_message: error_traceback = traceback.format_exception(*error_on_message) self._dump_message(error_traceback, message=self.__current_message) else: warnings.warn("Message will be removed from the pipeline and not dumped to the disk. " "Set `error_dump_message` to true to save the message on disk. " "This warning is only shown once in the runtime of a bot.") if self.__destination_queues and '_on_error' in self.__destination_queues: self.send_message(self.__current_message, path='_on_error') # remove message from pipeline self.acknowledge_message() # when bot acknowledge the message, # don't need to wait again error_on_message = False # run_mode: scheduled if self.run_mode == 'scheduled': self.logger.info('Shutting down scheduled bot.') self.stop(exitcode=0) # error_procedure: stop elif self.parameters.error_procedure == "stop": self.stop() # error_procedure: pass elif not error_on_pipeline: self.__error_retries_counter = 0 # reset counter # error_procedure: pass and pipeline problem else: # retry forever, see https://github.com/certtools/intelmq/issues/1333 # https://lists.cert.at/pipermail/intelmq-users/2018-October/000085.html pass else: self.__message_counter["success"] += 1 # no errors, check for run mode: scheduled if self.run_mode == 'scheduled': self.logger.info('Shutting down scheduled bot.') self.stop(exitcode=0) self.__stats() self.__handle_sighup()
def start(self, starting: bool = True, error_on_pipeline: bool = True, error_on_message: bool = False, source_pipeline: Optional[str] = None, destination_pipeline: Optional[str] = None): self.__source_pipeline = source_pipeline self.__destination_pipeline = destination_pipeline while True: try: if not starting and (error_on_pipeline or error_on_message): self.logger.info('Bot will continue in %s seconds.', self.parameters.error_retry_delay) time.sleep(self.parameters.error_retry_delay) if error_on_message: error_on_message = False if error_on_pipeline: try: self.__connect_pipelines() except Exception as exc: raise exceptions.PipelineError(exc) else: error_on_pipeline = False if starting: starting = False self.__handle_sighup() self.process() self.__error_retries_counter = 0 # reset counter if self.parameters.rate_limit and self.run_mode != 'scheduled': self.__sleep() except exceptions.PipelineError as exc: error_on_pipeline = True if self.parameters.error_log_exception: self.logger.exception('Pipeline failed.') else: self.logger.error(utils.error_message_from_exc(exc)) self.logger.error('Pipeline failed.') self.__disconnect_pipelines() except Exception as exc: # in case of serious system issues, exit immediately if isinstance(exc, MemoryError): self.logger.exception( 'Out of memory. Exit immediately. Reason: %r.' % exc.args[0]) self.stop() elif isinstance(exc, (IOError, OSError)) and exc.errno == 28: self.logger.exception( 'Out of disk space. Exit immediately.') self.stop() error_on_message = sys.exc_info() if self.parameters.error_log_exception: self.logger.exception("Bot has found a problem.") else: self.logger.error(utils.error_message_from_exc(exc)) self.logger.error("Bot has found a problem.") if self.parameters.error_log_message: # Dump full message if explicitly requested by config self.logger.info("Current Message(event): %r.", self.__current_message) # In case of permanent failures, stop now if isinstance(exc, exceptions.ConfigurationError): self.stop() except KeyboardInterrupt: self.logger.info("Received KeyboardInterrupt.") self.stop(exitcode=0) del self break finally: if getattr(self.parameters, 'testing', False): self.stop(exitcode=0) break if error_on_message or error_on_pipeline: self.__error_retries_counter += 1 # reached the maximum number of retries if (self.__error_retries_counter > self.parameters.error_max_retries): if error_on_message: if self.parameters.error_dump_message: error_traceback = traceback.format_exception( *error_on_message) self._dump_message( error_traceback, message=self.__current_message) self.__current_message = None # remove message from pipeline self.acknowledge_message() # when bot acknowledge the message, # don't need to wait again error_on_message = False # run_mode: scheduled if self.run_mode == 'scheduled': self.logger.info('Shutting down scheduled bot.') self.stop(exitcode=0) # error_procedure: stop elif self.parameters.error_procedure == "stop": self.stop() # error_procedure: pass elif not error_on_pipeline: self.__error_retries_counter = 0 # reset counter # error_procedure: pass and pipeline problem else: self.stop() # no errors, check for run mode: scheduled elif self.run_mode == 'scheduled': self.logger.info('Shutting down scheduled bot.') self.stop(exitcode=0) self.__handle_sighup()
def reject_message(self): if not self._has_message: raise exceptions.PipelineError("No message to acknowledge.") self._reject_message() self._has_message = False
def connect(self): if self.parameters.raise_on_connect: raise exceptions.PipelineError('Connect failed as requested')
def acknowledge(self): try: return self.pipe.rpop(self.internal_queue) except Exception as e: raise exceptions.PipelineError(e)