def test_send_with_partition_key(connstr_receivers):
    connection_str, receivers = connstr_receivers
    client = EventHubProducerClient.from_connection_string(connection_str)
    with client:
        data_val = 0
        for partition in [b"a", b"b", b"c", b"d", b"e", b"f"]:
            partition_key = b"test_partition_" + partition
            for i in range(50):
                batch = client.create_batch(partition_key=partition_key)
                batch.add(EventData(str(data_val)))
                data_val += 1
                client.send_batch(batch)

    found_partition_keys = {}
    for index, partition in enumerate(receivers):
        received = partition.receive_message_batch(timeout=5000)
        for message in received:
            try:
                event_data = EventData._from_message(message)
                existing = found_partition_keys[event_data.partition_key]
                assert existing == index
            except KeyError:
                found_partition_keys[event_data.partition_key] = index
def test_send_connection_idle_timeout_and_reconnect_sync(connstr_receivers):
    connection_str, receivers = connstr_receivers
    client = EventHubProducerClient.from_connection_string(
        conn_str=connection_str, idle_timeout=10)
    with client:
        ed = EventData('data')
        sender = client._create_producer(partition_id='0')
    with sender:
        sender._open_with_retry()
        time.sleep(11)
        sender._unsent_events = [ed.message]
        ed.message.on_send_complete = sender._on_outcome
        with pytest.raises(
            (uamqp.errors.ConnectionClose, uamqp.errors.MessageHandlerError,
             OperationTimeoutError)):
            # Mac may raise OperationTimeoutError or MessageHandlerError
            sender._send_event_data()
        sender._send_event_data_with_retry()

    messages = receivers[0].receive_message_batch(max_batch_size=10,
                                                  timeout=10000)
    received_ed1 = EventData._from_message(messages[0])
    assert received_ed1.body_as_str() == 'data'
def test_send_with_create_event_batch_with_app_prop_sync(connstr_receivers):
    connection_str, receivers = connstr_receivers
    app_prop_key = "raw_prop"
    app_prop_value = "raw_value"
    app_prop = {app_prop_key: app_prop_value}
    client = EventHubProducerClient.from_connection_string(
        connection_str, transport_type=TransportType.AmqpOverWebsocket)
    with client:
        event_data_batch = client.create_batch(max_size_in_bytes=100000)
        while True:
            try:
                ed = EventData('A single event data')
                ed.properties = app_prop
                event_data_batch.add(ed)
            except ValueError:
                break
        client.send_batch(event_data_batch)
        received = []
        for r in receivers:
            received.extend(r.receive_message_batch(timeout=5000))
        assert len(received) >= 1
        assert EventData._from_message(
            received[0]).properties[b"raw_prop"] == b"raw_value"
示例#4
0
async def test_client_secret_credential_async(aad_credential, live_eventhub):
    try:
        from azure.identity.aio import EnvironmentCredential
    except ImportError:
        pytest.skip("No azure identity library")

    credential = EnvironmentCredential()
    producer_client = EventHubProducerClient(
        fully_qualified_namespace=live_eventhub['hostname'],
        eventhub_name=live_eventhub['event_hub'],
        credential=credential,
        user_agent='customized information')
    consumer_client = EventHubConsumerClient(
        fully_qualified_namespace=live_eventhub['hostname'],
        eventhub_name=live_eventhub['event_hub'],
        consumer_group='$default',
        credential=credential,
        user_agent='customized information')

    async with producer_client:
        await producer_client.send(EventData(body='A single message'),
                                   partition_id='0')

    def on_event(partition_context, event):
        on_event.called = True
        on_event.partition_id = partition_context.partition_id
        on_event.event = event

    on_event.called = False
    async with consumer_client:
        task = asyncio.ensure_future(
            consumer_client.receive(on_event, partition_id='0'))
        await asyncio.sleep(6)
    await task
    assert on_event.called is True
    assert on_event.partition_id == "0"
    assert list(on_event.event.body)[0] == 'A single message'.encode('utf-8')
示例#5
0
async def run():
    # Create a producer client to send messages to the event hub.
    # Specify a connection string to your event hubs namespace and
    # the event hub name.
    #producer = EventHubProducerClient.from_connection_string(conn_str="EVENT HUBS NAMESPACE - CONNECTION STRING", eventhub_name="EVENT HUB NAME")
    producer = EventHubProducerClient.from_connection_string(
        conn_str=
        "Endpoint=sb://i3eventhub.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=VY9VS8ascXPcbEM6zhk7lXifEwY5u0WRot9nprAwXTI=",
        eventhub_name="iothubevent")
    '''
    json_body = { "measurement":"temperature",
                   "tags":{
                     "tag1":"val1"
                   },
                  "fields":{
                        "value1": 100.0
                  }
                }
    '''
    json_body = {
        "origin": "raspberry4",
        "payload": {
            "temperature": 21.644272300990785,
            "humidity": 78.56935158645383
        }
    }
    async with producer:
        # Create a batch.
        event_data_batch = await producer.create_batch()

        # Add events to the batch.
        event_data_batch.add(EventData(json_body))
        #event_data_batch.add(EventData('Second event'))
        #event_data_batch.add(EventData('Third event'))

        # Send the batch of events to the event hub.
        await producer.send_batch(event_data_batch)
示例#6
0
async def run(producer):

    async with producer:

        # Without specifying partition_id or partition_key
        # The events will be distributed to available partitions via round-robin.
        event_data_batch = await producer.create_batch(max_size_in_bytes=10000)

        # Specifying partition_id
        # event_data_batch = producer.create_batch(partition_id='0')

        # Specifying partition_key
        # event_data_batch = producer.create_batch(partition_key='pkey')

        while True:
            try:
                event_data_batch.add(
                    EventData('Message inside EventBatchData'))
            except ValueError:
                # EventDataBatch object reaches max_size.
                # New EventDataBatch object can be created here to send more data
                break

        await producer.send_batch(event_data_batch)
示例#7
0
async def test_partition_processor_process_events_error(connstr_senders):
    class ErrorPartitionProcessor(PartitionProcessor):
        async def process_events(self, events, partition_context):
            if partition_context.partition_id == "1":
                raise RuntimeError("processing events error")
            else:
                pass

        async def process_error(self, error, partition_context):
            if partition_context.partition_id == "1":
                assert isinstance(error, RuntimeError)
            else:
                raise RuntimeError(
                    "There shouldn't be an error for partition other than 1")

        async def close(self, reason, partition_context):
            if partition_context.partition_id == "1":
                assert reason == CloseReason.PROCESS_EVENTS_ERROR
            else:
                assert reason == CloseReason.SHUTDOWN

    connection_str, senders = connstr_senders
    for sender in senders:
        sender.send(EventData("EventProcessor Test"))
    eventhub_client = EventHubClient.from_connection_string(connection_str,
                                                            receive_timeout=3)
    partition_manager = SamplePartitionManager()

    event_processor = EventProcessor(eventhub_client,
                                     "$default",
                                     ErrorPartitionProcessor,
                                     partition_manager,
                                     polling_interval=1)
    asyncio.ensure_future(event_processor.start())
    await asyncio.sleep(10)
    await event_processor.stop()
示例#8
0
async def test_send_over_websocket_async(connstr_receivers):
    connection_str, receivers = connstr_receivers
    client = EventHubClient.from_connection_string(
        connection_str,
        transport_type=TransportType.AmqpOverWebsocket,
        network_tracing=False)
    sender = client.create_producer()

    event_list = []
    for i in range(20):
        event_list.append(EventData("Event Number {}".format(i)))

    async with sender:
        await sender.send(event_list)

    time.sleep(1)
    received = []
    for r in receivers:
        received.extend(r.receive(timeout=3))

    assert len(received) == 20

    for r in receivers:
        r.close()
示例#9
0
def test_send_batch_sync(connection_str, receivers):
    def batched():
        for i in range(10):
            yield "Event number {}".format(i)

    client = EventHubClient.from_connection_string(connection_str, debug=False)
    sender = client.add_sender()
    try:
        client.run()
        sender.send(EventData(batch=batched()))
    except:
        raise
    finally:
        client.stop()

    time.sleep(1)
    received = []
    for r in receivers:
        received.extend(r.receive(timeout=3))

    assert len(received) == 10
    for index, message in enumerate(received):
        assert list(
            message.body)[0] == "Event number {}".format(index).encode('utf-8')
示例#10
0
def main(req: func.HttpRequest):
    # Get event count from http request query parameter
    count = int(req.params.get('count', '1'))

    # Parse event metadata from http request
    json_string = req.get_body().decode('utf-8')
    event_dict = json.loads(json_string)

    # Create an EventHub Client and event batch
    client = EventHubProducerClient.from_connection_string(
        os.getenv('AzureWebJobsEventHubConnectionString'),
        eventhub_name='python-worker-ci-eventhub-batch-metadata')

    # Generate new event based on http request with full metadata
    event_data_batch = client.create_batch()
    random_number = int(event_dict.get('body', '0'))
    for i in range(count):
        event_data_batch.add(EventData(str(random_number + i)))

    # Send out event into event hub
    with client:
        client.send_batch(event_data_batch)

    return 'OK'
示例#11
0
def test_send_with_partition_key(connstr_receivers):
    connection_str, receivers = connstr_receivers
    client = EventHubClient.from_connection_string(connection_str,
                                                   network_tracing=False)
    sender = client.create_producer()
    with sender:
        data_val = 0
        for partition in [b"a", b"b", b"c", b"d", b"e", b"f"]:
            partition_key = b"test_partition_" + partition
            for i in range(50):
                data = EventData(str(data_val))
                #data.partition_key = partition_key
                data_val += 1
                sender.send(data, partition_key=partition_key)

    found_partition_keys = {}
    for index, partition in enumerate(receivers):
        received = partition.receive(timeout=5)
        for message in received:
            try:
                existing = found_partition_keys[message.partition_key]
                assert existing == index
            except KeyError:
                found_partition_keys[message.partition_key] = index
示例#12
0
async def test_receive_batch_early_callback_async(connstr_senders):
    ''' Test whether the callback is called once max_batch_size reaches and before max_wait_time reaches.
    '''
    connection_str, senders = connstr_senders
    for _ in range(10):
        senders[0].send(EventData("Test EventData"))
    client = EventHubConsumerClient.from_connection_string(
        connection_str, consumer_group='$default')

    async def on_event_batch(partition_context, event_batch):
        on_event_batch.received += len(event_batch)

    on_event_batch.received = 0

    async with client:
        task = asyncio.ensure_future(
            client.receive_batch(on_event_batch,
                                 max_batch_size=10,
                                 max_wait_time=100,
                                 starting_position="-1",
                                 partition_id="0"))
        await asyncio.sleep(10)
        assert on_event_batch.received == 10
    await task
示例#13
0
def test_example_eventhub_transfer(connection_str):
    import os
    from azure.eventhub import EventHubClient, EventData

    client = EventHubClient.from_connection_string(connection_str)
    sender = client.add_sender()

    try:
        client.run()
        # [START eventhub_client_transfer]
        logger = logging.getLogger("azure.eventhub")

        def callback(outcome, condition):
            logger.info("Message sent. Outcome: {}, Condition: {}".format(
                outcome, condition))

        event_data = EventData(b"A single event")
        sender.transfer(event_data, callback=callback)
        sender.wait()
        # [END eventhub_client_transfer]
    except:
        raise
    finally:
        client.stop()
示例#14
0
    def write_to_event_hub(self, filename, data):
        write_allowed = self.redis_notifier.notify_start_record(filename)

        if write_allowed:

            with event_hub_client:
                for idx, line in enumerate(data, start=1):

                    # Create a batch.
                    event_data_batch = event_hub_client.create_batch()

                    event_data_batch.add(EventData(line))

                    event_hub_client.send_batch(event_data_batch)

                    if idx % self.redis_write_step == 0:  # write to redis every n lines
                        self.redis_notifier.notify_read_lines(
                            filename, f"{idx-100}..{idx}")

            completed = self.redis_notifier.notify_completed(filename)

        else:
            logging.info(
                f"=====Attempted duplicate write for file: {filename} =====")
async def test_receive_over_websocket_async(connstr_senders):
    connection_str, senders = connstr_senders
    client = EventHubClient.from_connection_string(
        connection_str, transport_type=TransportType.AmqpOverWebsocket)
    receiver = client._create_consumer(consumer_group="$default",
                                       partition_id="0",
                                       event_position=EventPosition('@latest'),
                                       prefetch=500)

    event_list = []
    for i in range(20):
        event_list.append(EventData("Event Number {}".format(i)))

    async with receiver:
        received = await receiver.receive(timeout=5)
        assert len(received) == 0

        senders[0].send(event_list)

        time.sleep(1)

        received = await receiver.receive(max_batch_size=50, timeout=5)
        assert len(received) == 20
    await client.close()
async def test_non_exclusive_receiver_after_exclusive_receiver_async(
        connstr_senders):
    connection_str, senders = connstr_senders
    senders[0].send(EventData(b"Receiving only a single event"))

    client = EventHubClient.from_connection_string(connection_str)
    receiver1 = client._create_consumer(consumer_group="$default",
                                        partition_id="0",
                                        event_position=EventPosition("-1"),
                                        owner_level=15,
                                        prefetch=10)
    receiver2 = client._create_consumer(consumer_group="$default",
                                        partition_id="0",
                                        event_position=EventPosition("-1"),
                                        prefetch=10)
    try:
        output1 = await pump(receiver1)
        with pytest.raises(ConnectionLostError):
            await pump(receiver2)
        assert output1 == 1
    finally:
        await receiver1.close()
        await receiver2.close()
    await client.close()
示例#17
0
    async def flush(self, event):
        """
        Actually publishes the event,
        (called after request is done in the 'action' decorator).
        """
        task_id = self._get_loop_task_id()

        if not self.producer:
            raise Exception(("Azure event publisher couldn't publish because "
                             "producer has not been created."))

        # Serialize
        try:
            json_data = json.dumps(event.serialize()).encode()
        except TypeError:
            self.log_service.error(
                "Failed to flush an event! The event couldn't be serialized, "
                "(check log details for event data).",
                extra={
                    'event_data': event.serialize(),
                })
            raise

        # Without specifying partition_id or partition_key
        # the events will be distributed to available partitions via
        # round-robin.
        event_data_batch = await self.producer.create_batch()

        event_data_batch.add(EventData(json_data))
        await self.producer.send_batch(event_data_batch)

        if self.keep_flushed_copies:
            if task_id not in self.flushed:
                self.flushed[task_id] = []

            self.flushed[task_id].append(event)
示例#18
0
 async def receive(self):
     await asyncio.sleep(0.1)
     await self._on_event_received(EventData("mock events"))
示例#19
0
    def execute(self):
        self.client.run()
        self.logger.info("in progress - started")
        connection = psycopg2.connect(self.connection_string)
        cursor = connection.cursor()
        slot_name = "siirto_slot"

        # create the slot, if doesn't already exists
        cursor.execute(f"SELECT 1 FROM pg_replication_slots WHERE slot_name = '{slot_name}';")
        rows = cursor.fetchall()
        cursor_exists = False
        for row in rows:
            cursor_exists = True
        if not cursor_exists:
            cursor.execute(f"SELECT 'init' FROM "
                           f"pg_create_logical_replication_slot('{slot_name}', 'wal2json');")

        current_table_cdc_file_name = {}
        for table_name in self.table_names:
            table_name_in_folder = table_name.replace(".", "_")
            cdc_folder_for_table = os.path.join(self.output_folder_location,
                                                table_name_in_folder)
            file_indexes = []
            if os.path.exists(cdc_folder_for_table):
                file_indexes = [int(file_name.replace(f"{table_name}_cdc_", "").replace(".csv", ""))
                                for file_name in list(os.listdir(cdc_folder_for_table))
                                if re.search(f"^{table_name}_cdc_.*.csv$", file_name)]

            file_index = 1
            if len(file_indexes) > 0:
                file_index = max(file_indexes) + 1

            if not os.path.exists(cdc_folder_for_table):
                os.mkdir(cdc_folder_for_table)

            file_to_write = os.path.join(self.output_folder_location,
                                         table_name_in_folder,
                                         f"{table_name}_cdc_{file_index}.csv")
            current_table_cdc_file_name[table_name] = {
                'file_to_write': file_to_write,
                'index': file_index
            }

        tables_string = ",".join(self.table_names)
        while self.is_running:
            self.logger.info("running cdc pull iteration")
            cursor.execute(f"SELECT lsn, data FROM  pg_logical_slot_peek_changes('{slot_name}', "
                           f"NULL, NULL, 'pretty-print', '1', "
                           f"'add-tables', '{tables_string}');")
            rows = cursor.fetchall()
            rows_collected = {}
            max_lsn = None
            # read the WALs
            for row in rows:
                max_lsn = row[0]
                change_set = json.loads(row[1])
                change_set_entries = change_set["change"] if 'change' in change_set else []
                for change_set_entry in change_set_entries:
                    table_name = f"{change_set_entry['schema']}.{change_set_entry['table']}" \
                        if 'table' in change_set_entry else None
                    if table_name:
                        self.sender.send(EventData(json.dumps(change_set_entry)))

            # remove the WALs
            if max_lsn:
                cursor.execute(f"SELECT 1 FROM  pg_logical_slot_get_changes('{slot_name}', "
                               f"'{max_lsn}', NULL, 'pretty-print', '1', "
                               f"'add-tables', '{tables_string}');")
            # sleep for one second, before next pool
            time.sleep(self.poll_frequency)

        print(f'cleaning the {slot_name}')
        cursor.execute(f"SELECT 'stop' FROM pg_drop_replication_slot('{slot_name}');")
        print(f'cleared the {slot_name}')
        self.logger.info("stopped")
示例#20
0
def send_event_data_batch_with_properties(producer):
    event_data_batch = producer.create_batch()
    event_data = EventData('Message with properties')
    event_data.properties = {'prop_key': 'prop_value'}
    event_data_batch.add(event_data)
    producer.send_batch(event_data_batch)
示例#21
0
        return AccessToken(self.token, self.expiry)


# Target namespace and hub must also be specified.  Consumer group is set to default unless required otherwise.
FULLY_QUALIFIED_NAMESPACE = os.environ['EVENT_HUB_HOSTNAME']
EVENTHUB_NAME = os.environ['EVENT_HUB_NAME']

# The following part creates a SAS token. Users can use any way to create a SAS token.
SAS_POLICY = os.environ['EVENT_HUB_SAS_POLICY']
SAS_KEY = os.environ['EVENT_HUB_SAS_KEY']

uri = "sb://{}/{}".format(FULLY_QUALIFIED_NAMESPACE, EVENTHUB_NAME)
token_ttl = 3000  # seconds
sas_token = generate_sas_token(uri, SAS_POLICY, SAS_KEY, token_ttl)
# end of creating a SAS token

producer_client = EventHubProducerClient(
    fully_qualified_namespace=FULLY_QUALIFIED_NAMESPACE,
    eventhub_name=EVENTHUB_NAME,
    credential=CustomizedSASCredential(sas_token,
                                       time.time() + token_ttl),
    logging_enable=True)

start_time = time.time()
with producer_client:
    event_data_batch = producer_client.create_batch()
    event_data_batch.add(EventData('Single message'))
    producer_client.send_batch(event_data_batch)

print("Send messages in {} seconds.".format(time.time() - start_time))
示例#22
0
    # Create Event Hubs client
    client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY)
    sender = client.add_sender(partition="0")
    client.run()
    try:
        start_time = time.time()
        for i in range(100):
            print("Sending message: {}".format(i))
            writer = DatumWriter(SCHEMA)
            bytes_writer = io.BytesIO()
            encoder = avro.io.BinaryEncoder(bytes_writer)
            writer = DataFileWriter(open("users.avro", "wb"), DatumWriter(), SCHEMA)
            writer.append({"name": "Alyssa", "favorite_number": 256})
            writer.append({"name": "Ben", "favorite_number": 7, "favorite_color": "red"})
            writer.close()
            writer.write({"name": "123", "favorite_number": 10}, encoder)
            raw_bytes = bytes_writer.getvalue()
            sender.send(EventData(raw_bytes))
    except:
        raise
    finally:
        end_time = time.time()
        client.stop()
        run_time = end_time - start_time
        logger.info("Runtime: {} seconds".format(run_time))

except KeyboardInterrupt:
    pass

示例#23
0
def send_event_data_batch(producer):
    # Without specifying partition_id or partition_key
    # the events will be distributed to available partitions via round-robin.
    event_data_batch = producer.create_batch()
    event_data_batch.add(EventData('Single message'))
    producer.send_batch(event_data_batch)
def test_event_data_batch():
    batch = EventDataBatch(max_size_in_bytes=100, partition_key="par")
    batch.add(EventData("A"))
    assert batch.size_in_bytes == 89 and len(batch) == 1
    with pytest.raises(ValueError):
        batch.add(EventData("A"))
示例#25
0
logger = logging.getLogger("azure")

from miteazure.eventHubs.configuration import EVENT_HUB_CONNECTION_STR, \
                                              EVENT_HUB_PATH

try:
    if not EVENT_HUB_CONNECTION_STR:
        raise ValueError("No EventHubs CONNECTION STRING supplied.")

    # Create Event Hubs client
    client = EventHubClient.from_connection_string(conn_str=EVENT_HUB_CONNECTION_STR, event_hub_path=EVENT_HUB_PATH, debug=False)
    producer = client.create_producer(partition_id="0")
    try:
        start_time = time.time()
        for i in range(3):
            print("Sending message: {}".format(i))
            message = "Message {}".format(i)
            producer.send(EventData(message))
            print(f"Message [[[{message}]]] sent.")
    except:
        raise
    finally:
        end_time = time.time()
        run_time = end_time - start_time
        print(f"Runtime: {run_time} seconds")
        producer.close()
        client.close()

except KeyboardInterrupt:
    pass
def test_app_properties():
    app_props = {"a": "b"}
    event_data = EventData("")
    event_data.properties = app_props
    assert event_data.properties["a"] == "b"
def test_body_wrong_json():
    event_data = EventData('aaa')
    with pytest.raises(TypeError):
        event_data.body_as_json()
def test_body_json():
    event_data = EventData('{"a":"b"}')
    jo = event_data.body_as_json()
    assert jo["a"] == "b"
示例#29
0
        return Input_list


    try:
        start_time = time.time()
        Input_list = []
        Valid_Input = validateInput(Input_list)
        
        #Grabbing Data from API
        for ifsc in Valid_Input:
            main_api = f'https://ifsc.razorpay.com/{ifsc}'
            json_data = requests.get(main_api).json()
            print("Sending message: {}".format(ifsc))
            
            #creates the message
            message = "BANK:- " + json_data["BANK"] + " , BRANCH:- " + json_data["BRANCH"] + " , CITY:- " + \
                      json_data["CITY"] + " , STATE:-" + json_data["STATE"]
            sender.send(EventData(message))


    except:
        raise
    finally:
        end_time = time.time()
        client.stop()
        run_time = end_time - start_time
        logger.info("Runtime: {} seconds".format(run_time))

except KeyboardInterrupt:
    pass
def test_example_eventhub_sync_send_and_receive(live_eventhub_config):
    # [START create_eventhub_client_connstr]
    import os
    from azure.eventhub import EventHubClient

    connection_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format(
        os.environ['EVENT_HUB_HOSTNAME'],
        os.environ['EVENT_HUB_SAS_POLICY'],
        os.environ['EVENT_HUB_SAS_KEY'],
        os.environ['EVENT_HUB_NAME'])
    client = EventHubClient.from_connection_string(connection_str)
    # [END create_eventhub_client_connstr]

    from azure.eventhub import EventData, EventPosition

    # [START create_eventhub_client_sender]
    client = EventHubClient.from_connection_string(connection_str)
    # Create a producer.
    producer = client.create_producer(partition_id="0")
    # [END create_eventhub_client_sender]

    # [START create_eventhub_client_receiver]
    client = EventHubClient.from_connection_string(connection_str)
    # Create a consumer.
    consumer = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest'))
    # Create an exclusive consumer object.
    exclusive_consumer = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), owner_level=1)
    # [END create_eventhub_client_receiver]

    client = EventHubClient.from_connection_string(connection_str)
    producer = client.create_producer(partition_id="0")
    consumer = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest'))
    try:
        consumer.receive(timeout=1)

        # [START create_event_data]
        event_data = EventData("String data")
        event_data = EventData(b"Bytes data")
        event_data = EventData([b"A", b"B", b"C"])

        list_data = ['Message {}'.format(i) for i in range(10)]
        event_data = EventData(body=list_data)
        # [END create_event_data]

        # [START eventhub_client_sync_create_batch]
        event_data_batch = producer.create_batch(max_size=10000)
        while True:
            try:
                event_data_batch.try_add(EventData('Message inside EventBatchData'))
            except ValueError:
                # The EventDataBatch object reaches its max_size.
                # You can send the full EventDataBatch object and create a new one here.
                break
        # [END eventhub_client_sync_create_batch]

        # [START eventhub_client_sync_send]
        with producer:
            event_data = EventData(b"A single event")
            producer.send(event_data)
        # [END eventhub_client_sync_send]
        time.sleep(1)

        # [START eventhub_client_sync_receive]
        with consumer:
            logger = logging.getLogger("azure.eventhub")
            received = consumer.receive(timeout=5, max_batch_size=1)
            for event_data in received:
                logger.info("Message received:{}".format(event_data.body_as_str()))
        # [END eventhub_client_sync_receive]
            assert len(received) > 0
            assert received[0].body_as_str() == "A single event"
            assert list(received[-1].body)[0] == b"A single event"
    finally:
        pass