Example #1
0
class EventProcessor(AbstractEventProcessor):
    """EventProcessor host class for Event Hub."""
    def __init__(self, params):  # pragma: no cover
        """Sample Event Hub event processor implementation."""
        super().__init__()
        self.on_message_received_callback = params[0]
        self._msg_counter = 0
        self.logger = Logger()

    async def open_async(self, context):  # pragma: no cover
        """
        Initialize the event processor.

        Called by the processor host.
        """
        self.logger.info("Connection established %s", context.partition_id)

    async def close_async(self, context, reason):
        """
        Stop the event processor.

        Called by processor host.

        :param context: Information about the partition.
        :type context: ~azure.eventprocessorhost.PartitionContext
        :param reason: Reason for closing the async loop.
        :type reason: string
        """
        self.logger.info(
            "Connection closed (reason %s, id %s, offset %s, sq_number %s)",  # pragma: no cover
            reason,
            context.partition_id,
            context.offset,
            context.sequence_number)

    async def process_events_async(self, context,
                                   messages):  # pragma: no cover
        """
        Do the real work of the event processor.

        Called by the processor host when a batch of events has arrived.

        :param context: Information about the partition.
        :type context: ~azure.eventprocessorhost.PartitionContext
        :param messages: The events to be processed.
        :type messages: list[~azure.eventhub.common.EventData]
        """
        for message in messages:
            message_json = message.body_as_str(encoding='UTF-8')
            if self.on_message_received_callback is not None:
                self.on_message_received_callback(message_json)
                self.logger.debug("Received message: %s", message_json)
        self.logger.info("Events processed %s", context.sequence_number)
        await context.checkpoint_async()

    async def process_error_async(self, context, error):  # pragma: no cover
        """
        Recover from an error.

        Called when the underlying client experiences an error while receiving.
        EventProcessorHost will take care of continuing to pump messages, so
        no external action is required.

        :param context: Information about the partition.
        :type context: ~azure.eventprocessorhost.PartitionContext
        :param error: The error that occured.
        """
        self.logger.error("Event Processor Error %s", error)
Example #2
0
class StorageStreamingClient(AbstractStreamingClient):
    """Storage streaming client."""
    def __init__(self, config: dict):
        """
        Store the events in a storage.

        Configuration keys:
          STORAGE_PROVIDER -- e.g. AZURE_BLOBS
          STORAGE_KEY -- the blob storage account name
          STORAGE_SECRET -- the blob storage access key
          STORAGE_CONTAINER
          TIMESLICE_FORMAT

        >>> import os
        >>> import shutil
        >>> import tempfile
        >>> storage_folder = tempfile.mkdtemp()
        >>> storage_container = 'some-container'
        >>> storage_config = dict()
        >>> storage_config['STORAGE_PROVIDER'] = 'LOCAL'
        >>> storage_config['STORAGE_KEY'] = storage_folder
        >>> storage_config['STORAGE_SECRET'] = ''
        >>> storage_config['STORAGE_CONTAINER'] = storage_container
        >>> client = StorageStreamingClient(storage_config)
        >>> client.send('some-message')
        >>> len(os.listdir(os.path.join(storage_folder, storage_container)))
        1
        >>> shutil.rmtree(storage_folder)

        """
        provider = getattr(Provider,
                           config.get('STORAGE_PROVIDER', 'AZURE_BLOBS'))
        driver_class = get_driver(provider)
        driver = driver_class(config['STORAGE_KEY'], config['STORAGE_SECRET'])

        self.logger = Logger()

        try:
            self.container: Container = driver.create_container(
                config['STORAGE_CONTAINER'])
            self.logger.info('Created container %s',
                             config['STORAGE_CONTAINER'])
        except ContainerAlreadyExistsError:
            self.container: Container = driver.get_container(
                config['STORAGE_CONTAINER'])
            self.logger.debug('Using existing container %s',
                              config['STORAGE_CONTAINER'])

        self.timeslice_format = config.get('TIMESLICE_FORMAT',
                                           '%Y/%m/%d/%H/%M/%S')

    def start_receiving(self, on_message_received_callback):
        self.logger.error('Unexpectedly called %s on %s',
                          self.start_receiving.__name__,
                          self.__class__.__name__)

    def send(self, message):
        message_payload = BytesIO(message.encode('utf-8'))
        message_uid = str(uuid4())
        message_time = datetime.utcnow()
        message_folder = message_time.strftime(self.timeslice_format)
        message_path = '{}/{}'.format(message_folder, message_uid)
        self.container.upload_object_via_stream(message_payload, message_path)

    def stop(self):
        self.logger.error('Unexpectedly called %s on %s', self.stop.__name__,
                          self.__class__.__name__)