Exemple #1
0
class HttpMessageSender(MessageSender):
    """HttpMessageSender."""
    def __init__(self, config: dict):  # pragma: no cover
        """
        Message sender implementation that uses HTTP(S) to send messages.

        Configuration keys:

            HOST
            PORT
            SCHEME
            RETRIES
            BACKOFF
        """
        host = config.get('HOST')
        port = config.get('PORT')
        scheme = config.get('SCHEME', 'http')
        retries = config.get('RETRIES', 3)
        backoff = config.get('BACKOFF', 1)

        if not host:
            raise ValueError('Host endpoint must be provided.')

        if not port or int(port) <= 0:
            raise ValueError('Port cannot be 0 or less.')

        if scheme not in ('http', 'https'):
            raise ValueError('Scheme must be http or https')

        self.server_address = "%s://%s:%s" % (scheme, host, port)
        self.retries = retries
        self.backoff = backoff

        self.logger = Logger()

        self.logger.info("server_address: %s", self.server_address)

    def send(self, message):  # pragma: no cover
        return_value = False
        try:
            status_code = post_with_retries(self.server_address,
                                            message,
                                            retries=self.retries,
                                            backoff=self.backoff)

            if status_code != 200:
                self.logger.error(
                    "Error with a request %s and message not sent was %s",
                    status_code, message)
                print("Error with a request %s and message not sent was %s" %
                      (status_code, message))
            else:
                return_value = True

        except Exception as ex:
            self.logger.error('Failed to send request: %s', ex)

        return return_value
class KafkaStreamingClient(AbstractStreamingClient):
    """Kafka streaming client."""

    def __init__(self, config):  # pragma: no cover
        """
        Streaming client implementation based on Kafka.

        Configuration keys:
          KAFKA_ADDRESS
          KAFKA_CONSUMER_GROUP
          KAFKA_TOPIC
          TIMEOUT
          EVENT_HUB_KAFKA_CONNECTION_STRING
        """
        self.logger = Logger()

        self.topic = config.get("KAFKA_TOPIC")
        if not self.topic:
            raise ValueError("KAFKA_TOPIC is not set in the config object.")

        if not config.get("KAFKA_ADDRESS"):
            raise ValueError("KAFKA_ADDRESS is not set in the config object.")

        if config.get("TIMEOUT"):
            try:
                self.timeout = int(config.get("TIMEOUT"))
            except ValueError:
                self.timeout = None
        else:
            self.timeout = None

        kafka_config = self.create_kafka_config(config)
        self.admin = admin.AdminClient(kafka_config)

        if config.get("KAFKA_CONSUMER_GROUP") is None:
            self.logger.info('Creating Producer')
            self.producer = Producer(kafka_config)
            self.run = False
        else:
            self.logger.info('Creating Consumer')
            self.consumer = Consumer(kafka_config)
            self.run = True
            signal.signal(signal.SIGTERM, self.exit_gracefully)

    @staticmethod
    def create_kafka_config(user_config: dict) -> dict:  # pragma: no cover
        """Create the kafka configuration."""
        config = {
            "bootstrap.servers": user_config.get("KAFKA_ADDRESS"),
            "enable.auto.commit": False,
            "auto.offset.reset": "latest",
            "default.topic.config": {'auto.offset.reset': 'latest'},
        }

        if user_config.get('KAFKA_CONSUMER_GROUP') is not None:
            config['group.id'] = user_config['KAFKA_CONSUMER_GROUP']

        if user_config.get('KAFKA_DEBUG') is not None:
            config['debug'] = user_config['KAFKA_DEBUG']

        if user_config.get('EVENT_HUB_KAFKA_CONNECTION_STRING'):
            ssl_location = user_config.get('SSL_CERT_LOCATION') or '/etc/ssl/certs/ca-certificates.crt'
            kakfa_config = {
                'security.protocol': "SASL_SSL",
                'sasl.mechanism': "PLAIN",
                'ssl.ca.location': ssl_location,
                'sasl.username': '******',
                'sasl.password': user_config.get('EVENT_HUB_KAFKA_CONNECTION_STRING'),
                'client.id': 'agogosml',
            }

            config = {**config, **kakfa_config}

        return config

    def delivery_report(self, err, msg):  # pragma: no cover
        """
        Indicate delivery result.

        Called once for each message produced. Triggered by poll() or flush().

        :param err: An error message.
        :param msg: A string input to be uploaded to kafka.
        """
        if err is not None:
            self.logger.error('Message delivery failed: %s', err)
        else:
            self.logger.info('Message delivered to %s [%s]',
                             msg.topic(), msg.partition())

    def send(self, message: str):  # pragma: no cover
        if not isinstance(message, str):
            raise TypeError('str type expected for message')
        try:
            mutated_message = message.encode('utf-8')
            self.logger.info('Sending message to kafka topic: %s', self.topic)
            self.producer.poll(0)
            self.producer.produce(
                self.topic, mutated_message, callback=self.delivery_report)
            self.producer.flush()
            return True
        except Exception as ex:
            self.logger.error('Error sending message to kafka: %s', ex)
            return False

    def stop(self):
        """Stop streaming client."""
        self.run = False

    def check_timeout(self, start: datetime):  # pragma: no cover
        """Interrupts if too much time has elapsed since the kafka client started running."""
        if self.timeout is not None:
            elapsed = datetime.now() - start
            if elapsed.seconds >= self.timeout:
                raise KeyboardInterrupt

    def handle_kafka_error(self, msg):  # pragma: no cover
        """Handle an error in kafka."""
        if msg.error().code() == KafkaError._PARTITION_EOF:
            # End of partition event
            self.logger.info('%% %s [%d] reached end at offset %d\n',
                             msg.topic(), msg.partition(), msg.offset())
        else:
            # Error
            raise KafkaException(msg.error())

    def start_receiving(self, on_message_received_callback):  # pragma: no cover
        try:
            self.subscribe_to_topic()
            start = datetime.now()

            while self.run:
                # Stop loop after timeout if exists
                self.check_timeout(start)

                # Poll messages from topic
                msg = self.read_single_message()
                if msg is not None:
                    on_message_received_callback(msg)

        except KeyboardInterrupt:
            self.logger.info('Aborting listener...')
            raise

        finally:
            # Close down consumer to commit final offsets.
            self.consumer.close()

    def exit_gracefully(self, signum, frame):  # pylint: disable=unused-argument
        """Handle interrupt signal or calls to stop and exit gracefully."""
        self.logger.info("Handling interrupt signal %s gracefully." % signum)
        self.stop()

    def subscribe_to_topic(self):  # pragma: no cover
        """Subscribe to topic."""
        self.consumer.subscribe([self.topic])

    def read_single_message(self):  # pragma: no cover
        """Poll messages from topic."""
        msg = self.consumer.poll(0.000001)

        if msg is None:
            return None

        if msg.error():
            # Error or event
            self.handle_kafka_error(msg)
            return None

        # Proper message
        # self.logger.info('kafka read message: %s, from topic: %s', msg.value(), msg.topic())
        self.consumer.commit(msg)
        return msg.value()
Exemple #3
0
class EventHubStreamingClient(AbstractStreamingClient):  # pylint: disable=too-many-instance-attributes
    """Event Hub streaming client."""
    def __init__(self, config):  # pragma: no cover
        """
        Azure EventHub streaming client implementation.

        Configuration keys:
          AZURE_STORAGE_ACCESS_KEY
          AZURE_STORAGE_ACCOUNT
          EVENT_HUB_CONSUMER_GROUP
          EVENT_HUB_NAME
          EVENT_HUB_NAMESPACE
          EVENT_HUB_SAS_KEY
          EVENT_HUB_SAS_POLICY
          LEASE_CONTAINER_NAME
          TIMEOUT

        """
        storage_account_name = config.get("AZURE_STORAGE_ACCOUNT")
        storage_key = config.get("AZURE_STORAGE_ACCESS_KEY")
        lease_container_name = config.get("LEASE_CONTAINER_NAME")
        namespace = config.get("EVENT_HUB_NAMESPACE")
        eventhub = config.get("EVENT_HUB_NAME")
        consumer_group = config.get("EVENT_HUB_CONSUMER_GROUP", '$Default')
        user = config.get("EVENT_HUB_SAS_POLICY")
        key = config.get("EVENT_HUB_SAS_KEY")

        try:
            self.timeout = int(config['TIMEOUT'])
        except (KeyError, ValueError):
            self.timeout = None

        self.logger = Logger()
        self.loop = None

        # Create EPH Client
        if storage_account_name is not None and storage_key is not None:
            self.eph_client = EventHubConfig(sb_name=namespace,
                                             eh_name=eventhub,
                                             policy=user,
                                             sas_key=key,
                                             consumer_group=consumer_group)
            self.eh_options = EPHOptions()
            self.eh_options.release_pump_on_timeout = True
            self.eh_options.auto_reconnect_on_error = False
            self.eh_options.debug_trace = False
            self.storage_manager = AzureStorageCheckpointLeaseManager(
                storage_account_name, storage_key, lease_container_name)

            self.tasks = None
            signal.signal(signal.SIGTERM, self.exit_gracefully)

        # Create Send client
        else:
            address = "amqps://" + namespace + \
                      ".servicebus.windows.net/" + eventhub
            try:
                self.send_client = EventHubClient(address,
                                                  debug=False,
                                                  username=user,
                                                  password=key)
                self.sender = self.send_client.add_sender()
                self.send_client.run()
            except Exception as ex:
                self.logger.error('Failed to init EH send client: %s', ex)
                raise

    def start_receiving(self,
                        on_message_received_callback):  # pragma: no cover
        self.loop = asyncio.get_event_loop()
        try:
            host = EventProcessorHost(EventProcessor,
                                      self.eph_client,
                                      self.storage_manager,
                                      ep_params=[on_message_received_callback],
                                      eph_options=self.eh_options,
                                      loop=self.loop)

            self.tasks = asyncio.gather(
                host.open_async(), self.wait_and_close(host, self.timeout))
            self.loop.run_until_complete(self.tasks)
        except KeyboardInterrupt:
            self.logger.info(
                "Handling keyboard interrupt or SIGINT gracefully.")
            # Canceling pending tasks and stopping the loop
            for task in asyncio.Task.all_tasks():
                task.cancel()
            self.loop.run_forever()
            self.tasks.exception()
            raise
        finally:
            if self.loop.is_running():
                self.loop.stop()

    def exit_gracefully(self, signum, frame):  # pylint: disable=unused-argument
        """Handle signal interrupt (SIGTERM) gracefully."""
        self.logger.info("Handling signal interrupt %s gracefully." % signum)
        # Canceling pending tasks and stopping the loop
        self.stop()

    def send(self, message):  # pragma: no cover
        try:
            self.sender.send(EventData(body=message))
            self.logger.info('Sent message: %s', message)
            return True
        except Exception as ex:
            self.logger.error('Failed to send message to EH: %s', ex)
            return False

    def stop(self):  # pragma: no cover
        if self.loop:  # Stop consumer
            for task in asyncio.Task.all_tasks():
                task.cancel()
            self.loop.run_forever()
            if self.tasks:
                self.tasks.exception()
            if self.loop.is_running():
                self.loop.stop()
        else:  # Stop producer
            try:
                self.send_client.stop()
            except Exception as ex:
                self.logger.error('Failed to close send client: %s', ex)

    @staticmethod
    async def wait_and_close(
            host: EventProcessorHost,
            timeout: Optional[float] = None):  # pragma: no cover
        """Run a host indefinitely or until the timeout is reached."""
        if timeout is None:
            while True:
                await asyncio.sleep(1)
        else:
            await asyncio.sleep(timeout)
            await host.close_async()
Exemple #4
0
class EventProcessor(AbstractEventProcessor):
    """EventProcessor host class for Event Hub."""
    def __init__(self, params):  # pragma: no cover
        """Sample Event Hub event processor implementation."""
        super().__init__()
        self.on_message_received_callback = params[0]
        self._msg_counter = 0
        self.logger = Logger()

    async def open_async(self, context):  # pragma: no cover
        """
        Initialize the event processor.

        Called by the processor host.
        """
        self.logger.info("Connection established %s", context.partition_id)

    async def close_async(self, context, reason):
        """
        Stop the event processor.

        Called by processor host.

        :param context: Information about the partition.
        :type context: ~azure.eventprocessorhost.PartitionContext
        :param reason: Reason for closing the async loop.
        :type reason: string
        """
        self.logger.info(
            "Connection closed (reason %s, id %s, offset %s, sq_number %s)",  # pragma: no cover
            reason,
            context.partition_id,
            context.offset,
            context.sequence_number)

    async def process_events_async(self, context,
                                   messages):  # pragma: no cover
        """
        Do the real work of the event processor.

        Called by the processor host when a batch of events has arrived.

        :param context: Information about the partition.
        :type context: ~azure.eventprocessorhost.PartitionContext
        :param messages: The events to be processed.
        :type messages: list[~azure.eventhub.common.EventData]
        """
        for message in messages:
            message_json = message.body_as_str(encoding='UTF-8')
            if self.on_message_received_callback is not None:
                self.on_message_received_callback(message_json)
                self.logger.debug("Received message: %s", message_json)
        self.logger.info("Events processed %s", context.sequence_number)
        await context.checkpoint_async()

    async def process_error_async(self, context, error):  # pragma: no cover
        """
        Recover from an error.

        Called when the underlying client experiences an error while receiving.
        EventProcessorHost will take care of continuing to pump messages, so
        no external action is required.

        :param context: Information about the partition.
        :type context: ~azure.eventprocessorhost.PartitionContext
        :param error: The error that occured.
        """
        self.logger.error("Event Processor Error %s", error)
Exemple #5
0
class StorageStreamingClient(AbstractStreamingClient):
    """Storage streaming client."""
    def __init__(self, config: dict):
        """
        Store the events in a storage.

        Configuration keys:
          STORAGE_PROVIDER -- e.g. AZURE_BLOBS
          STORAGE_KEY -- the blob storage account name
          STORAGE_SECRET -- the blob storage access key
          STORAGE_CONTAINER
          TIMESLICE_FORMAT

        >>> import os
        >>> import shutil
        >>> import tempfile
        >>> storage_folder = tempfile.mkdtemp()
        >>> storage_container = 'some-container'
        >>> storage_config = dict()
        >>> storage_config['STORAGE_PROVIDER'] = 'LOCAL'
        >>> storage_config['STORAGE_KEY'] = storage_folder
        >>> storage_config['STORAGE_SECRET'] = ''
        >>> storage_config['STORAGE_CONTAINER'] = storage_container
        >>> client = StorageStreamingClient(storage_config)
        >>> client.send('some-message')
        >>> len(os.listdir(os.path.join(storage_folder, storage_container)))
        1
        >>> shutil.rmtree(storage_folder)

        """
        provider = getattr(Provider,
                           config.get('STORAGE_PROVIDER', 'AZURE_BLOBS'))
        driver_class = get_driver(provider)
        driver = driver_class(config['STORAGE_KEY'], config['STORAGE_SECRET'])

        self.logger = Logger()

        try:
            self.container: Container = driver.create_container(
                config['STORAGE_CONTAINER'])
            self.logger.info('Created container %s',
                             config['STORAGE_CONTAINER'])
        except ContainerAlreadyExistsError:
            self.container: Container = driver.get_container(
                config['STORAGE_CONTAINER'])
            self.logger.debug('Using existing container %s',
                              config['STORAGE_CONTAINER'])

        self.timeslice_format = config.get('TIMESLICE_FORMAT',
                                           '%Y/%m/%d/%H/%M/%S')

    def start_receiving(self, on_message_received_callback):
        self.logger.error('Unexpectedly called %s on %s',
                          self.start_receiving.__name__,
                          self.__class__.__name__)

    def send(self, message):
        message_payload = BytesIO(message.encode('utf-8'))
        message_uid = str(uuid4())
        message_time = datetime.utcnow()
        message_folder = message_time.strftime(self.timeslice_format)
        message_path = '{}/{}'.format(message_folder, message_uid)
        self.container.upload_object_via_stream(message_payload, message_path)

    def stop(self):
        self.logger.error('Unexpectedly called %s on %s', self.stop.__name__,
                          self.__class__.__name__)
Exemple #6
0
class EventHubStreamingClient(AbstractStreamingClient):
    """Event Hub streaming client."""

    def __init__(self, config):  # pragma: no cover
        """
        Azure EventHub streaming client implementation.

        Configuration keys:
          AZURE_STORAGE_ACCESS_KEY
          AZURE_STORAGE_ACCOUNT
          EVENT_HUB_CONSUMER_GROUP
          EVENT_HUB_NAME
          EVENT_HUB_NAMESPACE
          EVENT_HUB_SAS_KEY
          EVENT_HUB_SAS_POLICY
          LEASE_CONTAINER_NAME
          TIMEOUT

        """
        storage_account_name = config.get("AZURE_STORAGE_ACCOUNT")
        storage_key = config.get("AZURE_STORAGE_ACCESS_KEY")
        lease_container_name = config.get("LEASE_CONTAINER_NAME")
        namespace = config.get("EVENT_HUB_NAMESPACE")
        eventhub = config.get("EVENT_HUB_NAME")
        consumer_group = config.get("EVENT_HUB_CONSUMER_GROUP", '$default')
        user = config.get("EVENT_HUB_SAS_POLICY")
        key = config.get("EVENT_HUB_SAS_KEY")

        try:
            self.timeout = int(config['TIMEOUT'])
        except (KeyError, ValueError):
            self.timeout = None

        self.logger = Logger()

        # Create EPH Client
        if storage_account_name is not None and storage_key is not None:
            self.eph_client = EventHubConfig(
                sb_name=namespace,
                eh_name=eventhub,
                policy=user,
                sas_key=key,
                consumer_group=consumer_group)
            self.eh_options = EPHOptions()
            self.eh_options.release_pump_on_timeout = True
            self.eh_options.auto_reconnect_on_error = False
            self.eh_options.debug_trace = False
            self.storage_manager = AzureStorageCheckpointLeaseManager(
                storage_account_name, storage_key,
                lease_container_name)

        # Create Send client
        else:
            address = "amqps://" + namespace + \
                      ".servicebus.windows.net/" + eventhub
            try:
                self.send_client = EventHubClient(
                    address,
                    debug=False,
                    username=user,
                    password=key)
                self.sender = self.send_client.add_sender()
                self.send_client.run()
            except Exception as ex:
                self.logger.error('Failed to init EH send client: %s', ex)
                raise

    def start_receiving(self, on_message_received_callback):  # pragma: no cover
        loop = asyncio.get_event_loop()
        try:
            host = EventProcessorHost(
                EventProcessor,
                self.eph_client,
                self.storage_manager,
                ep_params=[on_message_received_callback],
                eph_options=self.eh_options,
                loop=loop)

            tasks = asyncio.gather(host.open_async(),
                                   self.wait_and_close(host, self.timeout))
            loop.run_until_complete(tasks)

        finally:
            loop.stop()

    def send(self, message):  # pragma: no cover
        try:
            self.sender.send(EventData(body=message))
            self.logger.info('Sent message: %s', message)
            return True
        except Exception as ex:
            self.logger.error('Failed to send message to EH: %s', ex)
            return False

    def stop(self):  # pragma: no cover
        try:
            self.send_client.stop()
        except Exception as ex:
            self.logger.error('Failed to close send client: %s', ex)

    @staticmethod
    async def wait_and_close(host: EventProcessorHost, timeout: Optional[float] = None):  # pragma: no cover
        """Run a host indefinitely or until the timeout is reached."""
        if timeout is None:
            while True:
                await asyncio.sleep(1)

        else:
            await asyncio.sleep(timeout)
            await host.close_async()