예제 #1
0
def add_receiver(client: EventHubClient, offset: Offset):
    receiver = client.add_receiver(CONSUMER_GROUP,
                                   EVENT_HUB_PARTITION,
                                   prefetch=PREFETCH,
                                   offset=offset)
    client.run()
    return receiver
예제 #2
0
def get():
    if request.args.get('since') is None:
        since = -1
    else:
        since = request.args.get('since')

    client = EventHubClient(address, debug=False, username=user, password=key)

    receiver = client.add_receiver(consumergroup, PARTITION, prefetch=1000, offset=Offset(since), keep_alive=72000)
    client.run()

    def generate():
        batched_events = receiver.receive(max_batch_size=100, timeout=500)
        yield '['
        index = 0
        while batched_events:
            for event_data in batched_events:
                if index > 0:
                    yield ','
                last_sn = event_data.sequence_number
                data = str(event_data.message)
                output_entity = literal_eval(data)
                output_entity.update({"_updated": str(last_sn)})
                yield json.dumps(output_entity)
                index = index + 1
            batched_events = receiver.receive(max_batch_size=100, timeout=500)
        yield ']'
    return Response(generate(), mimetype='application/json')
예제 #3
0
    def cosmosDBServiceToCosmosDB(self):
        
        database_link = 'dbs/' + DATABASE_ID
        collection_link = database_link + '/colls/' + COLLECTION_ID

        counter = 0
        filepath = ''

        CONSUMER_GROUP = "$Default"
        OFFSET = Offset("0")
        PARTITION = "0"
        eh_client = EventHubClient('amqps://xxxxx.servicebus.windows.net/txxxxqueue', 
                                   debug=True, 
                                   username='******', 
                                   password='******')
        receiver = eh_client.add_receiver(CONSUMER_GROUP, PARTITION, 
                                          prefetch=300, offset=OFFSET)
        try:    
            eh_client.run()
            while True:
                for event_data in receiver.receive(timeout=100):
                    rcv_msg = str(event_data.message)
                    # Filter the Null messages
                    if len(rcv_msg)>5:
                        # Load the messages in CosmosDB
                        cosmos_client.CreateDocument(collection_link, 
                                                     json.loads(str(event_data.message)))
   
            eh_client.stop()
        except Exception as e:
            print("Failed Receiving Record {}".format(str(e)) ) 
        finally:
            eh_client.stop()
예제 #4
0
class Consumer:
    consumer_group = None
    eventhubs_client = None
    offset = Offset("-1")
    redis_cache = None

    def __init__(self, eventhub, address, user, key, consumer_group,
                 redis_hostname, redis_key):
        self.consumer_group = consumer_group
        self.eventhubs_client = EventHubClient(address,
                                               debug=False,
                                               username=user,
                                               password=key)

        redis_topic = f"eventhubs-{eventhub}-{consumer_group}"
        self.redis_cache = RedisCache(redis_hostname, redis_key, redis_topic)

    def recieve(self):
        OFFSET = Offset(self.redis_cache.get_offset())
        receiver = self.eventhubs_client.add_receiver(self.consumer_group,
                                                      "0",
                                                      prefetch=5000,
                                                      offset=OFFSET)
        self.eventhubs_client.run()
        messages = receiver.receive(timeout=100)
        self.eventhubs_client.stop()
        return messages

    def commit(self, event_data):
        self.redis_cache.set_offset(event_data.sequence_number)
def isNewLabeledData(eh_url, eh_offset_url, eh_account, eh_key):
    '''
    Examines the EventHub to identify if sufficient quantities of new training data is available to trigger a re-train
    ''' 
    
    CONSUMER_GROUP = "$default"
    PARTITION = "0"
    
    offset_client = EventHubClient(eh_offset_url, debug=False, username=eh_account, password=eh_key)
    offset_receiver = offset_client.add_receiver(CONSUMER_GROUP, PARTITION, prefetch=5000)
    offset_sender = offset_client.add_sender(partition="0")
    offset_client.run()

    #Retrieves the current offset/sequence number for the write event queue from the dedicated offset queue
    offsets = offset_receiver.receive(timeout=50)
    current_offset = -1 #Default to -1 or reading the entire feed if another offset is not retrieved
    logging.info("{0} write messages recieved".format(len(offsets)))
    for offset in offsets:
        offset_event = json.loads(offset.body_as_str())
        current_offset = offset_event['CURRENT_OFFSET']
        logging.info("Retrieved previous offset event {0}".format(offset_event))
    current_offset = -1
    
    #Use the retrieved offset/sequence number to retrieve new writes
    event_client = EventHubClient(eh_url, debug=False, username=eh_account, password=eh_key)
    receiver = event_client.add_receiver(CONSUMER_GROUP, PARTITION, prefetch=5000, offset=Offset(current_offset))
    event_client.run()
    batch = receiver.receive(timeout=50)
    new_label_count = len(batch)
    for stuff in batch:
        logging.info("Offset {0}".format(stuff.sequence_number))
        current_offset = int(stuff.sequence_number) if int(stuff.sequence_number) > current_offset else current_offset
        logging.info("Message {0}".format(stuff.body_as_str()))
    logging.info("Processed {0} new label writes".format(new_label_count))
    
    #Write the last retrieved offset/sequence number to the offset message queue to be used in the next read
    offset_sender.send(EventData(json.dumps({"TIMESTAMP": datetime.datetime.now().timestamp(), "CURRENT_OFFSET": current_offset})))
    logging.info("Stored current offset event {0}".format(current_offset))
    #sender.send(EventData(json.dumps({"EVENT_TYPE": "LABEL_WRITE", "LABEL_INDEX":face_hash, "WRITE_TIMESTAMP": datetime.datetime.now().timestamp()})))
    
    #Close queue clients
    offset_client.stop()
    event_client.stop()
    
    #Return true if more then results found to execute retrain
    return True if new_label_count > 5 else False
def test_long_running_receive(connection_str):
    parser = argparse.ArgumentParser()
    parser.add_argument("--duration",
                        help="Duration in seconds of the test",
                        type=int,
                        default=30)
    parser.add_argument("--consumer",
                        help="Consumer group name",
                        default="$default")
    parser.add_argument("--partitions", help="Comma seperated partition IDs")
    parser.add_argument("--offset", help="Starting offset", default="-1")
    parser.add_argument("--conn-str",
                        help="EventHub connection string",
                        default=connection_str)
    parser.add_argument("--eventhub", help="Name of EventHub")
    parser.add_argument("--address", help="Address URI to the EventHub entity")
    parser.add_argument(
        "--sas-policy",
        help="Name of the shared access policy to authenticate with")
    parser.add_argument("--sas-key", help="Shared access key")

    args, _ = parser.parse_known_args()
    if args.conn_str:
        client = EventHubClient.from_connection_string(args.conn_str,
                                                       eventhub=args.eventhub,
                                                       debug=False)
    elif args.address:
        client = EventHubClient(args.address,
                                username=args.sas_policy,
                                password=args.sas_key)
    else:
        try:
            import pytest
            pytest.skip("Must specify either '--conn-str' or '--address'")
        except ImportError:
            raise ValueError("Must specify either '--conn-str' or '--address'")

    try:
        if not args.partitions:
            partitions = get_partitions(client)
        else:
            partitions = args.partitions.split(",")
        pumps = {}
        for pid in partitions:
            pumps[pid] = client.add_receiver(consumer_group=args.consumer,
                                             partition=pid,
                                             offset=Offset(args.offset),
                                             prefetch=50)
        client.run()
        pump(pumps, args.duration)
    finally:
        client.stop()
예제 #7
0
    def eventhubReceiveToFile(test_queue_url):
        # next, we dequeue these messages - 10 messages at a time 
        # (SQS max limit) till the queue is exhausted.
        # in production/real setup, I suggest using long polling as 
        # you get billed for each request, regardless of an empty response
        counter = 0
        filepath = ''

        CONSUMER_GROUP = "$Default"
        OFFSET = Offset("0")
        PARTITION = "0"

        client = EventHubClient('amqps://xxxxx.servicebus.windows.net/txxxxxqueue', 
                                debug=True, 
                                username='******', 
                                password='******')
        receiver = client.add_receiver(CONSUMER_GROUP, 
                                       PARTITION, prefetch=300, offset=OFFSET)
        try:    
            client.run()
            while True:
                for event_data in receiver.receive(timeout=100):
                    rcv_msg = str(event_data.message)
                    #print((rcv_msg))
                    if len(rcv_msg)>=5:
                        if counter!=0 and counter <= 50000:
                            #print(message['Body'])
                            file = open(filepath,'a')
                            file.write(rcv_msg)
                            file.write('\n')
                            # next, we delete the message from the queue so no one else will process it again
                        elif counter == 0:
                            filepath = createfile()
                            # print(filepath)
                            file = open(filepath,'w') 
                        else:
                            filepath = createfile()
                            #print(filepath)
                            counter = 1
                            file = open(filepath,'w') 
                        file.close() 
                        counter = counter + 1
        except Exception as e:
            print("Failed Receiving Record {}".format(str(e)) ) 
        finally:
            client.stop()
def test_long_running_receive(connection_str):
    parser = argparse.ArgumentParser()
    parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30)
    parser.add_argument("--consumer", help="Consumer group name", default="$default")
    parser.add_argument("--partitions", help="Comma seperated partition IDs")
    parser.add_argument("--offset", help="Starting offset", default="-1")
    parser.add_argument("--conn-str", help="EventHub connection string", default=connection_str)
    parser.add_argument("--eventhub", help="Name of EventHub")
    parser.add_argument("--address", help="Address URI to the EventHub entity")
    parser.add_argument("--sas-policy", help="Name of the shared access policy to authenticate with")
    parser.add_argument("--sas-key", help="Shared access key")

    args, _ = parser.parse_known_args()
    if args.conn_str:
        client = EventHubClient.from_connection_string(
            args.conn_str,
            eventhub=args.eventhub, debug=False)
    elif args.address:
        client = EventHubClient(
            args.address,
            username=args.sas_policy,
            password=args.sas_key)
    else:
        try:
            import pytest
            pytest.skip("Must specify either '--conn-str' or '--address'")
        except ImportError:
            raise ValueError("Must specify either '--conn-str' or '--address'")

    try:
        if not args.partitions:
            partitions = get_partitions(client)
        else:
            partitions = args.partitions.split(",")
        pumps = {}
        for pid in partitions:
            pumps[pid] = client.add_receiver(
                consumer_group=args.consumer,
                partition=pid,
                offset=Offset(args.offset),
                prefetch=50)
        client.run()
        pump(pumps, args.duration)
    finally:
        client.stop()
class RetrieveEventHub(QThread):
    def __init__(self,output_buffer,ADDRESS,USER,KEY,CONSUMER_GROUP,OFFSET,PARTITION,parent=None):
        super(RetrieveEventHub,self).__init__(parent)

        self.address = ADDRESS

        # SAS policy and key are not required if they are encoded in the URL
        self.user = USER
        self.key = KEY
        self.CONSUMER_GROUP = CONSUMER_GROUP
        self.OFFSET = OFFSET
        self.PARTITION = PARTITION
        self.total = 0
        self.last_sn = -1
        self.last_offset = "-1"
        self.client = EventHubClient(self.address, debug=False, username=self.user, password=self.key)
        self.receiver = self.client.add_receiver(self.CONSUMER_GROUP, self.PARTITION, prefetch=1000,offset=self.OFFSET)
    
        self.output_buffer = output_buffer
        self.last_frame = -1
    def ordered_by_index(self,elem):
        return int(elem[0])
    def run(self):
        self.client.run()
        global exit_value
        while(exit_value.value != 1):
            time.sleep(0.05)
            batched_events = self.receiver.receive(max_batch_size=10)
            contents = []
            for event in batched_events:
                oneLine = str(event.message).strip('&')
                content = oneLine.split('&')
                contents.append(content)
                #self.queue_service.delete_message(self.queue_name, message.id, message.pop_receipt)
            
            contents.sort(key=self.ordered_by_index)
            for content in contents:
                if int(content[0])>self.last_frame:
                    if not self.output_buffer.full():
                        self.output_buffer.put(content)
                    self.last_frame = int(content[0])
def inbound_sync_listener():
    """Initialize a delta inbound sync with Azure Active Directory."""
    while True:  # pylint: disable=too-many-nested-blocks
        provider_id = TENANT_ID
        try:
            initial_sync_time = check_last_sync("azure-user", "initial")
            LOGGER.info(initial_sync_time)
            LOGGER.info("This is your initial sync time")
            initial_sync_time = initial_sync_time["timestamp"][:26]
            latest_delta_sync_time = get_last_delta_sync(provider_id, "delta")
            if latest_delta_sync_time:
                latest_delta_sync_time = latest_delta_sync_time[
                    "timestamp"][:26]
                previous_sync_datetime = datetime.strptime(
                    latest_delta_sync_time, "%Y-%m-%dT%H:%M:%S.%f")
            else:
                previous_sync_datetime = datetime.strptime(
                    initial_sync_time, "%Y-%m-%dT%H:%M:%S.%f")
            # Create an eventhub client.
            LOGGER.info(ADDRESS)
            client = EventHubClient(ADDRESS,
                                    debug=False,
                                    username=USER,
                                    password=KEY)
            try:
                LOGGER.info("Opening connection to EventHub...")
                # Set prefetch to 1, we only want one event at a time.
                receiver = client.add_receiver(CONSUMER_GROUP,
                                               PARTITION,
                                               prefetch=1,
                                               offset=OFFSET)
                # Open the connection to the EventHub.
                client.run()
                # Get one event from EventHub.
                batch = receiver.receive(timeout=5000)
                while batch:
                    for event_data in batch:
                        # Get the event as a json record from the batch of events.
                        event_json = event_data.body_as_json()
                        record = event_json["records"][0]
                        operation_name = record["operationName"]
                        time = record["time"][:26]
                        record_timestamp = datetime.strptime(
                            time, "%Y-%m-%dT%H:%M:%S.%f")
                        # Only process events logged after the previous initial/delta sync.
                        # Only grab events concerning User or Group objects.
                        if (operation_name in VALID_OPERATIONS
                                and record_timestamp > previous_sync_datetime):
                            data = {
                                "initated_by":
                                record["properties"]["initiatedBy"],
                                "target_resources":
                                record["properties"]["targetResources"],
                                "operation_name":
                                operation_name,
                                "resultType":
                                record["resultType"],
                            }
                            LOGGER.info("Operation name: %s", operation_name)
                            LOGGER.info("Record to Change: %s", record)
                            record_timestamp_utc = record_timestamp.isoformat()
                            insert_change_to_db(data, record_timestamp_utc)
                            sync_source = "azure-" + VALID_OPERATIONS[
                                operation_name]
                            provider_id = TENANT_ID
                            conn = connect_to_db()
                            save_sync_time(
                                provider_id,
                                sync_source,
                                "delta",
                                conn,
                                record_timestamp_utc,
                            )
                            conn.close()
                            previous_sync_datetime = record_timestamp
                    batch = receiver.receive(timeout=50)
                LOGGER.info("Closing connection to EventHub...")
                # Close the connection to the EventHub.
                client.stop()
            except KeyboardInterrupt:
                pass
            finally:
                client.stop()
        except ExpectedError as err:
            LOGGER.debug((
                "%s Repolling after %s seconds...",
                err.__str__,
                LISTENER_POLLING_DELAY,
            ))
            time.sleep(LISTENER_POLLING_DELAY)
        except Exception as err:
            LOGGER.exception(err)
            raise err
예제 #11
0
PARTITION = "0"
CONSUMER_GROUP = "$default"


def GetAMQPAddress(hn, ep):
    ep_parts = dict(kv.split("=", 1) for kv in ep.split(";"))
    return "amqps://%s:%s@%s/%s" % (
        urllib.parse.quote(ep_parts["SharedAccessKeyName"], safe=''),
        urllib.parse.quote(ep_parts["SharedAccessKey"], safe=''),
        ep_parts["Endpoint"].replace("sb://", "").replace("/", ""), hn)


client = EventHubClient(GetAMQPAddress(sys.argv[1], sys.argv[2]), debug=False)
try:
    receiver = client.add_receiver(CONSUMER_GROUP,
                                   PARTITION,
                                   prefetch=5000,
                                   offset=OFFSET)
    client.run()
    while True:
        for ev in receiver.receive():
            last_sn = ev.sequence_number
            did = str(ev.device_id, 'utf-8')
            print(ev.offset, ev.sequence_number, did, ev.message)

    client.stop()

except KeyboardInterrupt:
    pass
finally:
    client.stop()
예제 #12
0
class Consumer:
    consumer_group = None
    eventhubs_client = None
    offset = Offset("-1")
    redis_cache = None  # Leaving in here for backward compatibility
    partition_ids = []
    redis_cache_partition_aware = {}

    def __init__(self, eventhub, address, user, key, consumer_group,
                 redis_hostname, redis_key):
        self.consumer_group = consumer_group
        self.eventhubs_client = EventHubClient(address,
                                               debug=False,
                                               username=user,
                                               password=key)

        # Leaving in here for backward compatibility
        redis_topic = f"eventhubs-{eventhub}-{consumer_group}"
        self.redis_cache = RedisCache(redis_hostname, redis_key, redis_topic)

        self.partition_ids = self.eventhubs_client.get_eventhub_info(
        )['partition_ids']

        for partition_id in self.partition_ids:
            redis_topic = f"eventhubs-{eventhub}-{consumer_group}-{partition_id}"
            self.redis_cache_partition_aware[partition_id] = RedisCache(
                redis_hostname, redis_key, redis_topic)

    def recieve(self):
        messages = []
        OFFSET = Offset(self.redis_cache.get_offset())
        receiver = self.eventhubs_client.add_receiver(self.consumer_group,
                                                      "0",
                                                      prefetch=5000,
                                                      offset=OFFSET)
        self.eventhubs_client.run()
        messages = receiver.receive(timeout=100)
        self.eventhubs_client.stop()
        return messages

    def commit(self, event_data):
        self.redis_cache.set_offset(event_data.sequence_number)

    def receive_all_partitions(self):
        messages = []
        for partition_id in self.partition_ids:

            OFFSET = Offset(
                self.redis_cache_partition_aware[partition_id].get_offset())

            receiver = self.eventhubs_client.add_receiver(self.consumer_group,
                                                          partition_id,
                                                          prefetch=5000,
                                                          offset=OFFSET)
            self.eventhubs_client.run()
            for message in receiver.receive(timeout=100):
                messages.append({
                    "message": message,
                    "partition_id": partition_id
                })
        self.eventhubs_client.stop()
        return messages

    def commit_all_partitions(self, event_data):
        self.redis_cache_partition_aware[
            event_data['partition_id']].set_offset(
                event_data['message'].sequence_number)
예제 #13
0
CONSUMER_GROUP = "$default"
OFFSET = Offset("-1")
PARTITION = "0"

##COUNTER
total = 0
"""
PROCESS DATA
"""

##SET CLIENT
client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY)

##PROCESS QUEUED DATA
try:
    receiver = client.add_receiver(CONSUMER_GROUP, PARTITION, prefetch=5000)
    client.run()
    start_time = time.time()
    for event_data in receiver.receive(timeout=100):
        jsonstring = event_data.body_as_json()
        devicename = jsonstring['device']
        reading = jsonstring['reading']
        print(f"Device: {devicename} | Reading: {reading}")
        total += 1

    end_time = time.time()
    client.stop()
    run_time = end_time - start_time
    print("Received {} messages in {} seconds".format(total, run_time))

except KeyboardInterrupt:
예제 #14
0
    address     = El URL para conectarse al Event Hub
    username    = El nompre del SAS Policy
    password    = La contraseña del SAS Policy
    debug       = Si se quiere hacer debug de la conexión
"""
EH_Client = EventHubClient(EH_Address, EH_SASName, EH_PrimaryKey, debug=False)

# Se conecta y recibe los eventos o atrapa cualquier posible error
try:
    """
    Crea un objeto Receiver.
    Parametros: 
        consumer_group      = Nombre del grupo consumidor
        partition           = ID de la partición
    """
    EH_Receiver = EH_Client.add_receiver(EH_ConsumerGruop, EH_Partition)

    # Arranca el cliente
    EH_Client.run()
    # Obtiene el tiempo inicial
    start_time = time.time()

    # Obtiene el primer batch de eventos
    evBatch = EH_Receiver.receive(timeout=1000)

    # Mientras que la variable bach contenga eventos
    while evBatch:

        # Por cada evento...
        for Event in evBatch:
예제 #15
0
ADDRESS = os.environ.get('EVENT_HUB_ADDRESS')

# SAS policy and key are not required if they are encoded in the URL
USER = os.environ.get('EVENT_HUB_SAS_POLICY')
KEY = os.environ.get('EVENT_HUB_SAS_KEY')
CONSUMER_GROUP = "$default"
OFFSET = Offset("-1")
PARTITION = "0"


total = 0
last_sn = -1
last_offset = "-1"
client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY)
try:
    receiver = client.add_receiver(CONSUMER_GROUP, PARTITION, prefetch=100, offset=OFFSET)
    client.run()
    batched_events = receiver.receive(max_batch_size=10)
    for event_data in batched_events:
        last_offset = event_data.offset.value
        last_sn = event_data.sequence_number
        total += 1
        print("Partition {}, Received {}, sn={} offset={}".format(
            PARTITION,
            total,
            last_sn,
            last_offset))

except KeyboardInterrupt:
    pass
finally: