def guard(self): logging.info("PulseGuardian started") while True: logging.info('Guard loop.') try: queues = pulse_management.queues() bindings = pulse_management.bindings() logging.info('Got queues') if queues: logging.info('Monitor queues') self.monitor_queues(queues, bindings) logging.info('Clear deleted queues') self.clear_deleted_queues(queues, bindings) if (self._connection_error_notified or self._unknown_error_notified): self._reset_notification_error_params() except (requests.ConnectionError, socket.error): self.notify_connection_error() self._increase_interval() except KeyboardInterrupt: break except Exception: self.notify_unknown_error() self._increase_interval() logging.info('Sleeping for %d seconds' % self._polling_interval) time.sleep(self._polling_interval)
def guard(self): mozdef.log( mozdef.NOTICE, mozdef.STARTUP, 'PulseGuardian started.', ) while True: mozdef.log( mozdef.DEBUG, mozdef.OTHER, 'Guard loop starting.', ) try: queues = pulse_management.queues() bindings = pulse_management.bindings() mozdef.log( mozdef.DEBUG, mozdef.OTHER, 'Fetched queue and binding data.', ) if queues: mozdef.log( mozdef.DEBUG, mozdef.OTHER, 'Monitoring queues.', ) self.monitor_queues(queues, bindings) mozdef.log( mozdef.DEBUG, mozdef.OTHER, 'Clearing deleted queues.', ) self.clear_deleted_queues(queues, bindings) if (self._connection_error_notified or self._unknown_error_notified): self._reset_notification_error_params() except (requests.ConnectionError, socket.error): self.notify_connection_error() self._increase_interval() except KeyboardInterrupt: break except Exception: self.notify_unknown_error() self._increase_interval() mozdef.log( mozdef.DEBUG, mozdef.OTHER, 'Sleeping for {} seconds'.format(self._polling_interval), ) time.sleep(self._polling_interval)
def test_delete_skip_unbounded(self): self._setup_queue() # Queue should still exist. self._wait_for_queue() # Get the queue's object db_session.refresh(self.rabbitmq_account) self.assertGreater(len(self.rabbitmq_account.queues), 0) # set queues as unbound so they won't be deleted for queue in self.rabbitmq_account.queues: queue.unbound = 1 # Queue multiple messages while no consumer exists. for i in range(self.guardian.del_queue_size + 1): msg = self._build_message(i) self.publisher.publish(msg) # Wait some time for published messages to be taken into account. for i in range(100): time.sleep(0.3) queues_to_delete = [ q_data['name'] for q_data in pulse_management.queues() if q_data['messages_ready'] > self.guardian.del_queue_size ] if queues_to_delete: break # Test that there are some queues that should be deleted. self.assertGreater(len(queues_to_delete), 0) # Setting up a callback to capture deleted queues deleted_queues = [] def on_delete(queue): deleted_queues.append(queue) self.guardian.on_delete = on_delete # Run through the code that decides whether to delete a queue # that has grown too large. # In this case, it should run the check and decide to not delete # any queues. self.guardian.monitor_queues(pulse_management.queues(vhost='/'), pulse_management.bindings(vhost='/')) # Test that none of the queues were deleted... self.assertTrue( all(q in queues_to_delete for q in pulse_management.queues())) # And that they were not deleted by guardian... self.assertGreater(len(queues_to_delete), 0)
def test_delete_skip_unbounded(self): self._setup_queue() # Queue should still exist. self._wait_for_queue() # Get the queue's object db_session.refresh(self.pulse_user) self.assertGreater(len(self.pulse_user.queues), 0) # set queues as unbound so they won't be deleted for queue in self.pulse_user.queues: queue.unbound = 1 # Queue multiple messages while no consumer exists. for i in xrange(self.guardian.del_queue_size + 1): msg = self._build_message(i) self.publisher.publish(msg) # Wait some time for published messages to be taken into account. for i in xrange(10): time.sleep(0.3) queues_to_delete = [q_data['name'] for q_data in pulse_management.queues() if q_data['messages_ready'] > self.guardian.del_queue_size] if queues_to_delete: break # Test that there are some queues that should be deleted. self.assertGreater(len(queues_to_delete), 0) # Setting up a callback to capture deleted queues deleted_queues = [] def on_delete(queue): deleted_queues.append(queue) self.guardian.on_delete = on_delete # Run through the code that decides whether to delete a queue # that has grown too large. # In this case, it should run the check and decide to not delete # any queues. self.guardian.monitor_queues(pulse_management.queues(), pulse_management.bindings()) # Test that none of the queues were deleted... self.assertTrue(all(q in queues_to_delete for q in pulse_management.queues())) # And that they were not deleted by guardian... self.assertGreater(len(queues_to_delete), 0)
def _wait_for_queue_record(self): '''Wait until one or more queues have been added to the database.''' consumer = self._create_passive_consumer() attempts = 0 while attempts < self.QUEUE_RECORD_CHECK_ATTEMPTS: attempts += 1 if attempts > 1: time.sleep(self.QUEUE_RECORD_CHECK_PERIOD) self.guardian.monitor_queues(pulse_management.queues(), pulse_management.bindings()) if Queue.query.filter(Queue.name == consumer.queue_name).first(): break
def _wait_for_queue_record(self): '''Wait until one or more queues have been added to the database.''' consumer = self._create_passive_consumer() attempts = 0 while attempts < self.QUEUE_RECORD_CHECK_ATTEMPTS: attempts += 1 if attempts > 1: time.sleep(self.QUEUE_RECORD_CHECK_PERIOD) self.guardian.monitor_queues(pulse_management.queues(vhost='/'), pulse_management.bindings(vhost='/')) if Queue.query.filter(Queue.name == consumer.queue_name).first(): break
def test_delete(self): self._setup_queue() # Queue should still exist. self._wait_for_queue() # Get the queue's object db_session.refresh(self.rabbitmq_account) self.assertGreater(len(self.rabbitmq_account.queues), 0) # Queue multiple messages while no consumer exists. for i in range(self.guardian.del_queue_size + 1): msg = self._build_message(i) self.publisher.publish(msg) # Wait some time for published messages to be taken into account. for i in range(100): time.sleep(0.3) queues_to_delete = [ q_data['name'] for q_data in pulse_management.queues() if q_data['messages_ready'] > self.guardian.del_queue_size ] if queues_to_delete: break # Test that there are some queues that should be deleted. self.assertGreater(len(queues_to_delete), 0) # Setting up a callback to capture deleted queues deleted_queues = [] def on_delete(queue): deleted_queues.append(queue) self.guardian.on_delete = on_delete # Monitor the queues; this should delete overgrown queues self.guardian.monitor_queues(pulse_management.queues(vhost='/'), pulse_management.bindings(vhost='/')) # Test that the queues that had to be deleted were deleted... self.assertTrue(not any(q in queues_to_delete for q in pulse_management.queues())) # And that they were deleted by guardian... self.assertEqual(sorted(queues_to_delete), sorted(deleted_queues)) # And that no queue has overgrown. queues_to_delete = [ q_data['name'] for q_data in pulse_management.queues() if q_data['messages_ready'] > self.guardian.del_queue_size ] self.assertEqual(len(queues_to_delete), 0)
def test_delete(self): self._setup_queue() # Queue should still exist. self._wait_for_queue() # Get the queue's object db_session.refresh(self.pulse_user) self.assertGreater(len(self.pulse_user.queues), 0) # Queue multiple messages while no consumer exists. for i in xrange(self.guardian.del_queue_size + 1): msg = self._build_message(i) self.publisher.publish(msg) # Wait some time for published messages to be taken into account. for i in xrange(10): time.sleep(0.3) queues_to_delete = [q_data['name'] for q_data in pulse_management.queues() if q_data['messages_ready'] > self.guardian.del_queue_size] if queues_to_delete: break # Test that there are some queues that should be deleted. self.assertGreater(len(queues_to_delete), 0) # Setting up a callback to capture deleted queues deleted_queues = [] def on_delete(queue): deleted_queues.append(queue) self.guardian.on_delete = on_delete # Monitor the queues; this should delete overgrown queues self.guardian.monitor_queues(pulse_management.queues(), pulse_management.bindings()) # Test that the queues that had to be deleted were deleted... self.assertTrue(not any(q in queues_to_delete for q in pulse_management.queues())) # And that they were deleted by guardian... self.assertEqual(sorted(queues_to_delete), sorted(deleted_queues)) # And that no queue has overgrown. queues_to_delete = [q_data['name'] for q_data in pulse_management.queues() if q_data['messages_ready'] > self.guardian.del_queue_size] self.assertEqual(len(queues_to_delete), 0)
def test_warning(self): self._setup_queue() # Queue should still exist. self._wait_for_queue() # Get the queue's object. db_session.refresh(self.pulse_user) # Queue multiple messages while no consumer exists. for i in xrange(self.guardian.warn_queue_size + 1): msg = self._build_message(i) self.publisher.publish(msg) # Wait for messages to be taken into account and get the warned # messages, if any. for i in xrange(10): time.sleep(0.3) queues_to_warn = set(q_data['name'] for q_data in pulse_management.queues() if self.guardian.warn_queue_size < q_data['messages_ready'] <= self.guardian.del_queue_size) if queues_to_warn: break # Test that no queue has been warned at the beginning of the process. self.assertTrue(not any(q.warned for q in self.pulse_user.queues)) # ... but some queues should be now. self.assertGreater(len(queues_to_warn), 0) # Monitor the queues; this should detect queues that should be warned. self.guardian.monitor_queues(pulse_management.queues(), pulse_management.bindings()) # Refresh the user's queues state. db_session.refresh(self.pulse_user) # Test that the queues that had to be "warned" were. self.assertTrue(all(q.warned for q in self.pulse_user.queues if q in queues_to_warn)) # The queues that needed to be warned haven't been deleted. queues_to_warn_bis = set(q_data['name'] for q_data in pulse_management.queues() if self.guardian.warn_queue_size < q_data['messages_ready'] <= self.guardian.del_queue_size) self.assertEqual(queues_to_warn_bis, queues_to_warn)
def test_warning(self): self._setup_queue() # Queue should still exist. self._wait_for_queue() # Get the queue's object. db_session.refresh(self.rabbitmq_account) # Queue multiple messages while no consumer exists. for i in range(self.guardian.warn_queue_size + 1): msg = self._build_message(i) self.publisher.publish(msg) # Wait for messages to be taken into account and get the warned # messages, if any. for i in range(100): time.sleep(0.3) queues_to_warn = set( q_data['name'] for q_data in pulse_management.queues() if self.guardian.warn_queue_size < q_data['messages_ready'] <= self.guardian.del_queue_size) if queues_to_warn: break # Test that no queue has been warned at the beginning of the process. self.assertTrue(not any(q.warned for q in self.rabbitmq_account.queues)) # ... but some queues should be now. self.assertGreater(len(queues_to_warn), 0) # Monitor the queues; this should detect queues that should be warned. self.guardian.monitor_queues(pulse_management.queues(vhost='/'), pulse_management.bindings(vhost='/')) # Refresh the user's queues state. db_session.refresh(self.rabbitmq_account) # Test that the queues that had to be "warned" were. self.assertTrue( all(q.warned for q in self.rabbitmq_account.queues if q in queues_to_warn)) # The queues that needed to be warned haven't been deleted. queues_to_warn_bis = set( q_data['name'] for q_data in pulse_management.queues() if self.guardian.warn_queue_size < q_data['messages_ready'] <= self.guardian.del_queue_size) self.assertEqual(queues_to_warn_bis, queues_to_warn)
def _wait_for_binding_delete(self, queue_name, exchange_name, routing_key): """Wait until a binding has been removed from the database""" consumer = self._create_passive_consumer() attempts = 0 while attempts < self.QUEUE_RECORD_CHECK_ATTEMPTS: attempts += 1 if attempts > 1: time.sleep(self.QUEUE_RECORD_CHECK_PERIOD) self.guardian.clear_deleted_queues(pulse_management.queues(), pulse_management.bindings()) if not Binding.query.filter( Binding.queue_name == queue_name, Binding.exchange == exchange_name, Binding.routing_key == routing_key).first(): break
def _wait_for_binding_record(self, queue_name, exchange_name, routing_key): """Wait until a binding has been added to the database""" consumer = self._create_passive_consumer() attempts = 0 while attempts < self.QUEUE_RECORD_CHECK_ATTEMPTS: attempts += 1 if attempts > 1: time.sleep(self.QUEUE_RECORD_CHECK_PERIOD) self.guardian.monitor_queues(pulse_management.queues(vhost='/'), pulse_management.bindings(vhost='/')) if Binding.query.filter( Binding.queue_name == queue_name, Binding.exchange == exchange_name, Binding.routing_key == routing_key).first(): break
def _wait_for_binding_delete(self, queue_name, exchange_name, routing_key): """Wait until a binding has been removed from the database""" consumer = self._create_passive_consumer() attempts = 0 while attempts < self.QUEUE_RECORD_CHECK_ATTEMPTS: attempts += 1 if attempts > 1: time.sleep(self.QUEUE_RECORD_CHECK_PERIOD) self.guardian.clear_deleted_queues(pulse_management.queues(), pulse_management.bindings()) if not Binding.query.filter(Binding.queue_name == queue_name, Binding.exchange == exchange_name, Binding.routing_key == routing_key).first(): break