def run(self): """See :meth:`ingest.strike.monitors.monitor.Monitor.run` """ logger.info('Running experimental S3 Strike processor') # Loop endlessly polling SQS queue while self._running: # Between each pass over the SQS, refresh configuration from database in case of credential changes. # This eliminates the need to stop and restart a Strike job to pick up configuration updates. self.reload_configuration() with SQSClient(self._credentials, self._region_name) as client: # For each new file we receive a notification about: logger.debug('Beginning long-poll against queue with wait time of %s seconds.' % self.wait_time) messages = client.receive_messages(self._sqs_name, batch_size=10, wait_time_seconds=self.wait_time, visibility_timeout_seconds=self.visibility_timeout) for message in messages: try: # Perform message extraction and then callback to ingest self._process_s3_notification(message) # Remove message from queue now that the message is processed message.delete() except SQSNotificationError: logger.exception('Unable to process message. Invalid SQS S3 notification.') if self.sqs_discard_unrecognized: # Remove message from queue when unrecognized logger.warning('Removing message that cannot be processed.') message.delete() except S3NoDataNotificationError: logger.exception('Unable to process message. File size of 0') message.delete()
def get_queue_size(self): """See :meth:`messaging.backends.backend.MessagingBackend.get_queue_size`""" with SQSClient(self._credentials, self._region_name) as client: return client.get_queue_size(queue_name=self._queue_name)