class EventDataBatch(object): """A batch of events. Sending events in a batch is more performant than sending individual events. EventDataBatch helps you create the maximum allowed size batch of `EventData` to improve sending performance. Use the `add` method to add events until the maximum batch size limit in bytes has been reached - at which point a `ValueError` will be raised. Use the `send_batch` method of :class:`EventHubProducerClient<azure.eventhub.EventHubProducerClient>` or the async :class:`EventHubProducerClient<azure.eventhub.aio.EventHubProducerClient>` for sending. **Please use the create_batch method of EventHubProducerClient to create an EventDataBatch object instead of instantiating an EventDataBatch object directly.** **WARNING: Updating the value of the instance variable max_size_in_bytes on an instantiated EventDataBatch object is HIGHLY DISCOURAGED. The updated max_size_in_bytes value may conflict with the maximum size of events allowed by the Event Hubs service and result in a sending failure.** :param int max_size_in_bytes: The maximum size of bytes data that an EventDataBatch object can hold. :param str partition_id: The specific partition ID to send to. :param str partition_key: With the given partition_key, event data will be sent to a particular partition of the Event Hub decided by the service. """ def __init__(self, max_size_in_bytes=None, partition_id=None, partition_key=None): # type: (Optional[int], Optional[str], Optional[Union[str, bytes]]) -> None self.max_size_in_bytes = max_size_in_bytes or constants.MAX_MESSAGE_LENGTH_BYTES self.message = BatchMessage(data=[], multi_messages=False, properties=None) self._partition_id = partition_id self._partition_key = partition_key set_message_partition_key(self.message, self._partition_key) self._size = self.message.gather()[0].get_message_encoded_size() self._count = 0 def __repr__(self): # type: () -> str batch_repr = "max_size_in_bytes={}, partition_id={}, partition_key={!r}, event_count={}".format( self.max_size_in_bytes, self._partition_id, self._partition_key, self._count) return "EventDataBatch({})".format(batch_repr) def __len__(self): return self._count @classmethod def _from_batch(cls, batch_data, partition_key=None): # type: (Iterable[EventData], Optional[AnyStr]) -> EventDataBatch batch_data_instance = cls(partition_key=partition_key) batch_data_instance.message._body_gen = ( # pylint:disable=protected-access batch_data) return batch_data_instance def _load_events(self, events): for event_data in events: try: self.add(event_data) except ValueError: raise ValueError( "The combined size of EventData collection exceeds the Event Hub frame size limit. " "Please send a smaller collection of EventData, or use EventDataBatch, " "which is guaranteed to be under the frame size limit") @property def size_in_bytes(self): # type: () -> int """The combined size of the events in the batch, in bytes. :rtype: int """ return self._size def add(self, event_data): # type: (EventData) -> None """Try to add an EventData to the batch. The total size of an added event is the sum of its body, properties, etc. If this added size results in the batch exceeding the maximum batch size, a `ValueError` will be raised. :param event_data: The EventData to add to the batch. :type event_data: ~azure.eventhub.EventData :rtype: None :raise: :class:`ValueError`, when exceeding the size limit. """ if self._partition_key: if (event_data.partition_key and event_data.partition_key != self._partition_key): raise ValueError( "The partition key of event_data does not match the partition key of this batch." ) if not event_data.partition_key: set_message_partition_key(event_data.message, self._partition_key) trace_message(event_data) event_data_size = event_data.message.get_message_encoded_size() # For a BatchMessage, if the encoded_message_size of event_data is < 256, then the overhead cost to encode that # message into the BatchMessage would be 5 bytes, if >= 256, it would be 8 bytes. size_after_add = ( self._size + event_data_size + _BATCH_MESSAGE_OVERHEAD_COST[0 if (event_data_size < 256) else 1]) if size_after_add > self.max_size_in_bytes: raise ValueError( "EventDataBatch has reached its size limit: {}".format( self.max_size_in_bytes)) self.message._body_gen.append(event_data) # pylint: disable=protected-access self._size = size_after_add self._count += 1
class EventDataBatch(object): """ Sending events in batch get better performance than sending individual events. EventDataBatch helps you create the maximum allowed size batch of `EventData` to improve sending performance. Use `try_add` method to add events until the maximum batch size limit in bytes has been reached - a `ValueError` will be raised. Use `send` method of ~azure.eventhub.EventHubProducer or ~azure.eventhub.aio.EventHubProducer for sending. Please use the `create_batch` method of `EventHubProducer` to create an `EventDataBatch` object instead of instantiating an `EventDataBatch` object directly. """ def __init__(self, max_size=None, partition_key=None): self.max_size = max_size or constants.MAX_MESSAGE_LENGTH_BYTES self._partition_key = partition_key self.message = BatchMessage(data=[], multi_messages=False, properties=None) self._set_partition_key(partition_key) self._size = self.message.gather()[0].get_message_encoded_size() self._count = 0 def __len__(self): return self._count @property def size(self): """The size in bytes :return: int """ return self._size @staticmethod def _from_batch(batch_data, partition_key=None): batch_data_instance = EventDataBatch(partition_key=partition_key) batch_data_instance.message._body_gen = batch_data # pylint:disable=protected-access return batch_data_instance def _set_partition_key(self, value): if value: annotations = self.message.annotations if annotations is None: annotations = dict() annotations[types.AMQPSymbol(EventData.PROP_PARTITION_KEY)] = value header = MessageHeader() header.durable = True self.message.annotations = annotations self.message.header = header def try_add(self, event_data): """ The message size is a sum up of body, properties, header, etc. :param event_data: ~azure.eventhub.EventData :return: None :raise: ValueError, when exceeding the size limit. """ if event_data is None: log.warning( "event_data is None when calling EventDataBatch.try_add. Ignored" ) return if not isinstance(event_data, EventData): raise TypeError('event_data should be type of EventData') if self._partition_key: if event_data.partition_key and event_data.partition_key != self._partition_key: raise EventDataError( 'The partition_key of event_data does not match the one of the EventDataBatch' ) if not event_data.partition_key: event_data._set_partition_key(self._partition_key) # pylint:disable=protected-access event_data._trace_message() # pylint:disable=protected-access event_data_size = event_data.message.get_message_encoded_size() # For a BatchMessage, if the encoded_message_size of event_data is < 256, then the overhead cost to encode that # message into the BatchMessage would be 5 bytes, if >= 256, it would be 8 bytes. size_after_add = self._size + event_data_size\ + _BATCH_MESSAGE_OVERHEAD_COST[0 if (event_data_size < 256) else 1] if size_after_add > self.max_size: raise ValueError( "EventDataBatch has reached its size limit {}".format( self.max_size)) self.message._body_gen.append(event_data) # pylint: disable=protected-access self._size = size_after_add self._count += 1
class EventDataBatch(object): """A batch of events. Sending events in a batch is more performant than sending individual events. EventDataBatch helps you create the maximum allowed size batch of `EventData` to improve sending performance. Use the `add` method to add events until the maximum batch size limit in bytes has been reached - at which point a `ValueError` will be raised. Use the `send_batch` method of :class:`EventHubProducerClient<azure.eventhub.EventHubProducerClient>` or the async :class:`EventHubProducerClient<azure.eventhub.aio.EventHubProducerClient>` for sending. The `create_batch` method accepts partition_key as a parameter for sending a particular partition. **Please use the create_batch method of EventHubProducerClient to create an EventDataBatch object instead of instantiating an EventDataBatch object directly.** :param int max_size_in_bytes: The maximum size of bytes data that an EventDataBatch object can hold. :param str partition_id: The specific partition ID to send to. :param str partition_key: With the given partition_key, event data will be sent to a particular partition of the Event Hub decided by the service. """ def __init__(self, max_size_in_bytes=None, partition_id=None, partition_key=None): self.max_size_in_bytes = max_size_in_bytes or constants.MAX_MESSAGE_LENGTH_BYTES self.message = BatchMessage(data=[], multi_messages=False, properties=None) self._partition_id = partition_id self._partition_key = partition_key set_message_partition_key(self.message, self._partition_key) self._size = self.message.gather()[0].get_message_encoded_size() self._count = 0 def __len__(self): return self._count @staticmethod def _from_batch(batch_data, partition_key=None): batch_data_instance = EventDataBatch(partition_key=partition_key) batch_data_instance.message._body_gen = batch_data # pylint:disable=protected-access return batch_data_instance @property def size_in_bytes(self): """The combined size of the events in the batch, in bytes. :rtype: int """ return self._size def add(self, event_data): """Try to add an EventData to the batch. The total size of an added event is the sum of its body, properties, etc. If this added size results in the batch exceeding the maximum batch size, a `ValueError` will be raised. :param event_data: The EventData to add to the batch. :type event_data: ~azure.eventhub.EventData :rtype: None :raise: :class:`ValueError`, when exceeding the size limit. """ if self._partition_key: if event_data.partition_key and event_data.partition_key != self._partition_key: raise ValueError('The partition key of event_data does not match the partition key of this batch.') if not event_data.partition_key: set_message_partition_key(event_data.message, self._partition_key) trace_message(event_data) event_data_size = event_data.message.get_message_encoded_size() # For a BatchMessage, if the encoded_message_size of event_data is < 256, then the overhead cost to encode that # message into the BatchMessage would be 5 bytes, if >= 256, it would be 8 bytes. size_after_add = self._size + event_data_size \ + _BATCH_MESSAGE_OVERHEAD_COST[0 if (event_data_size < 256) else 1] if size_after_add > self.max_size_in_bytes: raise ValueError("EventDataBatch has reached its size limit: {}".format(self.max_size_in_bytes)) self.message._body_gen.append(event_data) # pylint: disable=protected-access self._size = size_after_add self._count += 1
class EventDataBatch(object): """ The EventDataBatch class is a holder of a batch of event data within max size bytes. Use ~azure.eventhub.Producer.create_batch method to create an EventDataBatch object. Do not instantiate an EventDataBatch object directly. """ def __init__(self, max_size=None, partition_key=None): self.max_size = max_size or constants.MAX_MESSAGE_LENGTH_BYTES self._partition_key = partition_key self.message = BatchMessage(data=[], multi_messages=False, properties=None) self._set_partition_key(partition_key) self._size = self.message.gather()[0].get_message_encoded_size() self._count = 0 def __len__(self): return self._count @property def size(self): """The size in bytes :return: int """ return self._size @staticmethod def _from_batch(batch_data, partition_key=None): batch_data_instance = EventDataBatch(partition_key=partition_key) batch_data_instance.message._body_gen = batch_data # pylint:disable=protected-access return batch_data_instance def _set_partition_key(self, value): if value: annotations = self.message.annotations if annotations is None: annotations = dict() annotations[types.AMQPSymbol(EventData.PROP_PARTITION_KEY)] = value header = MessageHeader() header.durable = True self.message.annotations = annotations self.message.header = header def try_add(self, event_data): """ The message size is a sum up of body, properties, header, etc. :param event_data: :return: """ if event_data is None: log.warning( "event_data is None when calling EventDataBatch.try_add. Ignored" ) return if not isinstance(event_data, EventData): raise TypeError('event_data should be type of EventData') if self._partition_key: if event_data.partition_key and event_data.partition_key != self._partition_key: raise EventDataError( 'The partition_key of event_data does not match the one of the EventDataBatch' ) if not event_data.partition_key: event_data._set_partition_key(self._partition_key) # pylint:disable=protected-access event_data_size = event_data.message.get_message_encoded_size() # For a BatchMessage, if the encoded_message_size of event_data is < 256, then the overhead cost to encode that # message into the BatchMessage would be 5 bytes, if >= 256, it would be 8 bytes. size_after_add = self._size + event_data_size\ + _BATCH_MESSAGE_OVERHEAD_COST[0 if (event_data_size < 256) else 1] if size_after_add > self.max_size: raise ValueError( "EventDataBatch has reached its size limit {}".format( self.max_size)) self.message._body_gen.append(event_data) # pylint: disable=protected-access self._size = size_after_add self._count += 1