示例#1
0
def target_topic_consumer(unittest_config: Config, target_topic: Tuple[str, int]) -> Consumer:
    consumer = Consumer(
        {
            "group.id": "asdf",
            "enable.auto.commit": False,
            "enable.partition.eof": False,
            **unittest_config.create_confluent_config(),
        }
    )
    consumer.assign([TopicPartition(topic=target_topic[0], partition=i, offset=0) for i in range(target_topic[1])])
    yield consumer
    consumer.close()
示例#2
0
    def consumer_factory_(topic: str) -> Consumer:
        consumer = Consumer({
            "group.id": "asdf",
            "enable.auto.commit": False,
            "enable.partition.eof": False,
            **unittest_config.create_confluent_config(),
        })
        partitions = consumer.list_topics(topic=topic).topics[topic].partitions

        consumer.assign([
            TopicPartition(topic=topic, partition=p, offset=0)
            for p in partitions
        ])
        consumers.append(consumer)
        return consumer
示例#3
0
def create_consumers(args, num_partitions, partition_table):
    consumers = []
    transactional = args["transactional"]
    for i in range(num_partitions):
        partition_table[i] = []
        oc = Consumer({
            'bootstrap.servers':
            args["kafka"],
            'group.id':
            str(uuid.uuid4()),
            'auto.offset.reset':
            'latest',
            'api.version.request':
            True,
            'isolation.level':
            ('read_committed' if transactional else 'read_uncommitted'),
            'max.poll.interval.ms':
            86400000
        })
        oc.assign([TopicPartition(args["output_topic"], i)])
        oc.poll(0.5)
        consumers.append(oc)
    return consumers
示例#4
0
def main():
    parser = argparse.ArgumentParser(
        epilog="""Description:
           Reidentification demo using any number of cameras: 
           Either camera can be used for registration or reidentification only, or for both.
           
           Plays a video from a jpeg topic,
           visualizes head detection with a gray bounding box around a head.
           When a detection is identified, changes the bounding box color to orange
           and writes the dwell time, age and ID (derived from the reid MS ID) above the heads.
           
           Displays ('-d') or stores ('-o') the result of this demo in kafka topics.

           Required topics (example):
           - <prefix>.cam.0.original.Image.jpg
           - <prefix>.cam.0.dets.ObjectDetectionRecord.json
           - <prefix>.cam.0.frameinfo.FrameInfoRecord.json
           - <prefix>.cam.0.ages.AgeRecord.json
           - <prefix>.cam.1.original.Image.jpg
           - <prefix>.cam.1.dets.ObjectDetectionRecord.json
           - <prefix>.cam.1.frameinfo.FrameInfoRecord.json
           - <prefix>.cam.1.ages.AgeRecord.json
           ...
           - <prefix>.cam.1.reids.ReidRecord.json
           """,
        formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument("broker",
                        help="The name of the kafka broker.",
                        type=str)
    parser.add_argument("prefix",
                        help="Prefix of topics (base|skeleton).",
                        type=str)
    parser.add_argument('-d', "--display", action='store_true')
    parser.add_argument('-o',
                        '--output',
                        help='write output image into kafka topic',
                        action='store_true')
    parser.add_argument('text',
                        help='Text to display (age|dwell_time|both).',
                        type=str)
    args = parser.parse_args()

    if not args.display and not args.output:
        parser.error(
            "Missing argument: -d (display output) or -o (write output to kafka) is needed"
        )

    if args.output:
        producer = Producer({'bootstrap.servers': args.broker})

    overlay = cv2.imread('resources/powered_by_white.png',
                         cv2.IMREAD_UNCHANGED)

    # Prepare the topics to read
    input_topics = [
        f"{args.prefix}.cam.{id}.{topic_postfix}" for id in CAMERA_TOPIC_IDS
        for topic_postfix in TOPIC_POSTFIXES
    ]
    reid_topics = [
        f"{args.prefix}.cam.{id}.{topic_postfix}" for id in REID_TOPIC_IDS
        for topic_postfix in REID_TOPIC_POSTFIXES
    ]
    consumable_topics = list(map(TopicInfo, input_topics)) \
                        + (list(map(lambda t: TopicInfo(t, drop=False), reid_topics)))

    # TODO (when names via person stream): Remove this consumer
    reg_consumer = Consumer({
        'bootstrap.servers': args.broker,
        'group.id': 'multicamreid_reg',
        'auto.offset.reset': 'earliest'
    })
    reg_consumer.assign(
        [TopicPartition(topic="named.records.json", partition=0, offset=0)])

    output_topics = dict((id, f"{args.prefix}.cam.{id}.{OUTPUT_TOPIC_POSTFIX}")
                         for id in CAMERA_TOPIC_IDS)

    # read message, draw and display them
    consumer = TimeOrderedGeneratorWithTimeout(broker=args.broker,
                                               groupid="detection",
                                               topics_infos=consumable_topics,
                                               latency_ms=200,
                                               commit_interval_sec=None,
                                               group_by_time=True)

    registrations: Dict[str, Registration] = {}
    i = 0
    inner_id = 0
    scaling = 1.0
    for msgs in consumer.getMessages():
        k = -1
        for time, v in message_list_to_frame_structure(msgs).items():
            message = v.get(args.prefix, {})

            # Collect Reid records
            reid_records = {}
            for reid_id in REID_TOPIC_IDS:
                reid_message = message.get(reid_id, {})
                reid_records.update(reid_message.get("reid", {}))

            # Process the image
            for topic_key, topic_message in filter(
                    lambda t: t[0] not in REID_TOPIC_IDS, message.items()):
                img = topic_message.get("image", {})
                if not isinstance(img, np.ndarray):
                    continue
                head_detections = topic_message.get("head_detection", {})
                # Set the image scale
                shape_orig = head_detections.pop("image", {})
                if shape_orig:
                    scaling = img.shape[1] / shape_orig["frame_info"]["columns"]

                # Processing the detections of the image
                for detection_key, detection_record in head_detections.items():
                    object_detection_record = detection_record.get(
                        "bounding_box", {})
                    if not object_detection_record:
                        continue
                    key_to_display = ""
                    color = COLOR_DARK_GREY

                    face_detection = detection_record.get("unknown", {})
                    if face_detection:
                        color = COLOR_LIGHT_GREY

                    age = None
                    age_detection_record = detection_record.get("age", {})
                    if age_detection_record:
                        age = age_detection_record["age"]
                    if args.text == "age" or args.text == "both":
                        key_to_display = f"Age: {age}" if age else ""

                    # Reidentification received for the detection
                    reid_records_for_det = reid_records.get(detection_key, {})
                    if reid_records_for_det:
                        for reid_record in filter(lambda r: "reid_event" in r,
                                                  reid_records_for_det):
                            # We only use the first [0] identified face now
                            reid_key = reid_record["reid_event"]["match_list"][
                                0]["id"]["first_detection_key"]
                            registered = registrations.get(reid_key, None)
                            if registered:
                                age_to_display = ""
                                if age:
                                    registered.addAge(age)
                                if args.text == "age" or args.text == "both":
                                    age_to_display = f"; Age: {registered.age:d}" if age else ""
                                # Calculate the dwell time if required
                                dwell_time_display = ""
                                if args.text == "dwell_time" or args.text == "both":
                                    detection_time = reid_record["reid_event"][
                                        "match_list"][0]["id"][
                                            "first_detection_time"]
                                    dwell_time = time - int(detection_time)
                                    dwell_time_display = f"; Dwell time: {dwell_time}ms"
                                color = COLOR_ORANGE
                                name_to_display = registered.name if registered.name else f"ID: {registered.id}"
                                key_to_display = f"{name_to_display}{age_to_display}{dwell_time_display}"

                            else:
                                inner_id += 1
                                registrations[reid_key] = Registration(
                                    id=inner_id)
                                if age:
                                    registrations[reid_key].addAge(age)

                                # Update the technical naming topic
                                #  TODO (when names via person stream): remove
                                producer.produce(
                                    "detected.records.json",
                                    key=str(reid_key).encode("utf-8"),
                                    value=(str(inner_id) +
                                           ";").encode("utf-8"),
                                    timestamp=time)

                    # Read the technical naming topic
                    #  TODO (when names via person stream): remove
                    reg_msg = reg_consumer.poll(0.01)
                    if reg_msg is not None:
                        try:
                            key = reg_msg.key().decode("utf-8")
                            name = reg_msg.value().decode("utf-8")
                            # Update the person name
                            reg_to_update = registrations.get(key)
                            if reg_to_update:
                                reg_to_update.addName(name)
                            else:
                                registrations[key] = Registration(name=name)
                        except:
                            print(
                                "Decoding entry of the named.records topic failed.",
                                flush=True)

                    # draw text above bounding box
                    img = draw_nice_text(
                        canvas=img,
                        text=key_to_display,
                        bounding_box=object_detection_record["bounding_box"],
                        color=color,
                        scale=scaling)

                    # draw bounding_box
                    img = draw_nice_bounding_box(
                        canvas=img,
                        bounding_box=object_detection_record["bounding_box"],
                        color=color,
                        scaling=scaling)

                # draw ultinous logo
                img = draw_overlay(canvas=img,
                                   overlay=overlay,
                                   position=Position.BOTTOM_RIGHT,
                                   scale=scaling)

                # produce output topic
                if args.output:
                    out_topic = output_topics.get(topic_key)
                    producer.produce(out_topic,
                                     value=encode_image_to_message(img),
                                     timestamp=time)
                    producer.poll(0)
                    if i % 1000 == 0:
                        producer.flush()
                    i += 1

                # display #
                if args.display:
                    cv2.imshow(f"DEMO Camera {topic_key}", img)
                    k = cv2.waitKey(33)

        if k == 113:  # The 'q' key to stop
            break
        elif k == -1:  # normally -1 returned,so don't print it
            continue
        else:
            print(f"Press 'q' key for EXIT!")
示例#5
0
class TimeOrderedGeneratorWithTimeout(GeneratorInterface):
    """
    A general generator which can read multiple topics and merge their messages in time order.
    A message must be emitted at (arrival_system_time + latency_ms).
    In batch mode (until reaching the first EOP on each stream) the generator will not discard any messages.
    """
    def __init__(self,
                 broker,
                 groupid,
                 topics_infos: List[TopicInfo],
                 latency_ms,
                 commit_interval_sec=None,
                 group_by_time=False,
                 begin_timestamp=None,
                 begin_flag=None,
                 end_timestamp=None,
                 end_flag=None,
                 heartbeat_interval_ms=-1):
        """
        :param broker: Broker to connect to.
        :param groupid: Group id of the consumer.
        :param topics_infos: [TopicInfo()] - list of TopicInfo objects.
        :param latency_ms: (integer >=0) Latency to wait before serving a message.
                            After this messages with lower or equal timestamps will be discarded.
        :param commit_interval_sec: How many seconds to wait between commits.-1 does not commit with the given group id.
        :param group_by_time: Group messages with the same timestamp. This will yield a list of messages.
        :param begin_timestamp: Timestamp of the kafka messages where the generator will start.
        :param begin_flag: BEGINNING, CONTINUE, LIVE - CONTINUE will continue from the last committed offset.
                            If there was no committed offset will start from the end of the stream.
        :param end_timestamp: Timestamp where to end the reading.
        :param end_flag: NEVER, END_OF_PARTITION
        :param heartbeat_interval_ms: -1 does not produce heartbeat. After every interval will produce a HeartBeat typed
                                        message with the timestamp.
        """
        if begin_timestamp is not None and begin_flag is not None:
            raise Exception(
                'You can not set the begin timestamp and a flag in the same time.'
            )
        if end_timestamp is not None and end_flag is not None:
            raise Exception(
                'You can not set the end timestamp and a flag in the same time.'
            )
        if begin_timestamp is not None and end_timestamp is not None and begin_timestamp >= end_timestamp:
            raise Exception(
                'The begin timestamp is larger then the end timestamp.')
        if begin_flag is not None and end_flag is not None and \
                begin_flag == BeginFlag.LIVE and end_flag == EndFlag.END_OF_PARTITION:
            raise Exception(
                'You can not start in live and process until the end of the streams.'
            )
        if end_flag is not None and not (end_flag == EndFlag.END_OF_PARTITION
                                         or end_flag == EndFlag.NEVER):
            raise Exception(
                'Unknow end flag: {} . Please use the given enum to use proper end flag.'
                .format(end_flag))
        self.end_ts = end_timestamp
        self.end_flag = end_flag
        self.commit_interval_sec = commit_interval_sec
        self.latency_ms = latency_ms
        self.group_by_time = group_by_time
        self.max_poll_interval_ms = 5 * 60 * 1000
        self.consumer = Consumer({
            'bootstrap.servers':
            broker,
            'group.id':
            groupid,
            'enable.auto.commit':
            False,
            'auto.offset.reset':
            'earliest'
            if begin_flag == BeginFlag.CONTINUE_OR_BEGINNING else 'latest',
            'fetch.wait.max.ms':
            20,
            'max.poll.interval.ms':
            self.max_poll_interval_ms,
            'enable.partition.eof':
            True
        })
        self.last_poll = None

        self.tps = []
        self.queues = {}
        self.messages_to_be_committed = {}
        self.begin_timestamp = begin_timestamp
        for ti in topics_infos:
            topic_name = ti.topic
            self.messages_to_be_committed[topic_name] = {
                'last_msg': None,
                'committed': True
            }
            if begin_timestamp is not None:
                self.tps.extend(
                    self.consumer.offsets_for_times([
                        TopicPartition(topic_name,
                                       partition=ti.partition,
                                       offset=begin_timestamp)
                    ]))
            elif begin_flag is not None:
                if begin_flag == BeginFlag.BEGINNING:
                    self.tps.append(
                        TopicPartition(topic_name,
                                       partition=ti.partition,
                                       offset=OFFSET_BEGINNING))
                elif begin_flag in (BeginFlag.CONTINUE,
                                    BeginFlag.CONTINUE_OR_BEGINNING):
                    self.tps.append(
                        TopicPartition(topic_name,
                                       partition=ti.partition,
                                       offset=OFFSET_STORED))
                elif begin_flag == BeginFlag.LIVE:
                    self.tps.append(
                        TopicPartition(topic_name,
                                       partition=ti.partition,
                                       offset=OFFSET_END))
                else:
                    raise Exception(
                        'Unknown begin flag. Please use the enum to provide proper begin flag.'
                    )
            else:
                self.tps.append(
                    TopicPartition(topic_name,
                                   partition=ti.partition,
                                   offset=OFFSET_END))
            end_offset = None
            if end_flag is not None and end_flag == EndFlag.END_OF_PARTITION:
                end_offset = self.consumer.get_watermark_offsets(
                    TopicPartition(topic_name, 0))[1] - 1
            if end_offset is None or end_offset >= 0:
                self.queues[topic_name] = Topic(topic_name,
                                                self.consumer,
                                                end_offset=end_offset,
                                                partition=ti.partition,
                                                drop=ti.drop)
        self.consumer.assign(self.tps)
        self.last_commit = time.time()
        self.running = True
        self.heartbeat_interval_ms = heartbeat_interval_ms
        self.next_hb = None

    def stopGenerator(self):
        self.running = False

    def _serve_messages(self, message_to_serve):
        if self.commit_interval_sec is not None and self.group_by_time:
            for msg in message_to_serve:
                self.messages_to_be_committed[msg.topic()]['last_msg'] = msg
                self.messages_to_be_committed[msg.topic()]['committed'] = False

        # serve messages
        if self.group_by_time:
            yield message_to_serve
        else:
            for msg in message_to_serve:
                self.messages_to_be_committed[msg.topic()]['last_msg'] = msg
                self.messages_to_be_committed[msg.topic()]['committed'] = False
                yield msg
                if not self.running:
                    break

        # commit messages when they were delivered
        current_time = time.time()
        if self.commit_interval_sec is not None and (
                current_time - self.last_commit) > self.commit_interval_sec:
            for k in self.messages_to_be_committed.keys():
                if not self.messages_to_be_committed[k]['committed']:
                    self.consumer.commit(
                        self.messages_to_be_committed[k]['last_msg'])
                    self.messages_to_be_committed[k]['committed'] = True
            self.last_commit = current_time

    def _serve_heartbeat(self, current_timestamp_ms):
        if self.next_hb is None:
            if self.begin_timestamp is not None:
                self.next_hb = self.begin_timestamp
            else:
                self.next_hb = current_timestamp_ms
        while self.next_hb <= current_timestamp_ms:
            yield HeartBeat(self.next_hb)
            self.next_hb += self.heartbeat_interval_ms

    def _can_serve(self):
        min_ets = min([
            q.queue[0].message.timestamp()[1]
            for q in self.queues.values() if len(q.queue) > 0
        ],
                      default=-1)
        if min_ets == -1:
            return None
        deadline = getSystemTimestamp() - self.latency_ms
        if all([q.can_be_emitted(min_ets) for q in self.queues.values()]) and \
                any([q.queue[0].ts < deadline for q in self.queues.values()
                     if len(q.queue) > 0 and q.queue[0].message.timestamp()[1] == min_ets]):
            return min_ets
        else:
            return None

    def getMessages(self):
        while self.running:
            if all([v.stopped for v in self.queues.values()]):
                message_to_serve = []
                for q in self.queues.values():
                    message_to_serve.extend(q.queue)
                message_to_serve = [m.message for m in message_to_serve]
                message_to_serve.sort(key=lambda x: x.timestamp()[1])
                while len(message_to_serve) > 0:
                    ts = message_to_serve[0].timestamp()[1]
                    serve_it = []
                    while len(message_to_serve) > 0 and message_to_serve[
                            0].timestamp()[1] == ts:
                        serve_it.append(message_to_serve.pop(0))
                    if not self.heartbeat_interval_ms == -1:
                        yield from self._serve_heartbeat(ts)
                    yield from self._serve_messages(serve_it)
                logging.info('Exiting from generator.')
                break
            self.last_poll = getSystemTimestamp()
            msg = self.consumer.poll(0.001)
            if msg is not None:
                if msg.error():
                    if msg.error().code() == KafkaError._PARTITION_EOF:
                        if msg.topic() in self.queues:
                            self.queues[msg.topic()].first_eop_reached = True
                            self.queues[msg.topic()].end_of_partition = True
                    else:
                        logging.error('Unhandle error: {}'.format(msg.error()))
                        break
                else:
                    self.queues[msg.topic()].end_of_partition = False
                    if self.end_ts is not None and msg.timestamp(
                    )[1] > self.end_ts:
                        self.queues[msg.topic()].stop_topic()
                    else:
                        self.queues[msg.topic()].add_message(msg)
            while self.running:
                event_ts_to_serve = self._can_serve()
                if event_ts_to_serve is None or \
                        self.max_poll_interval_ms - (getSystemTimestamp() - self.last_poll) < 30000:
                    if self.end_flag == EndFlag.NEVER and self.heartbeat_interval_ms != -1 \
                            and any([q.end_of_partition for q in self.queues.values()]):
                        if self.next_hb is None:
                            self.next_hb = min(
                                getSystemTimestamp() - self.latency_ms,
                                min([
                                    q.queue[0].message.timestamp()[1]
                                    for q in self.queues.values()
                                    if len(q.queue) > 0
                                ],
                                    default=sys.maxsize))
                        if self.next_hb < min(
                                getSystemTimestamp() - self.latency_ms,
                                min([
                                    q.queue[0].message.timestamp()[1]
                                    for q in self.queues.values()
                                    if len(q.queue) > 0
                                ],
                                    default=sys.maxsize)):
                            yield from self._serve_heartbeat(self.next_hb)
                    break
                if self.heartbeat_interval_ms != -1:
                    yield from self._serve_heartbeat(event_ts_to_serve)
                message_to_serve = []
                for q in self.queues.values():
                    message_to_serve.extend(q.get_messages(event_ts_to_serve))
                yield from self._serve_messages(message_to_serve)
                if self.end_ts is not None and self.end_ts <= event_ts_to_serve:
                    self.running = False
        self.consumer.close()
示例#6
0
def compute_achieved_throughput(broker, partitions_with_offsets, result_dict):
    partitions_with_offsets = {}
    input_consumer = Consumer({
        'bootstrap.servers': broker,
        'group.id': str(uuid.uuid4()),
        # 'group.id': 'achieved_throughput_measurer',
        'auto.offset.reset': 'earliest',
        'enable.auto.commit': True,
        'auto.commit.interval.ms': 1000,
        'api.version.request': True,
        'max.poll.interval.ms': 60000
    })

    output_consumer = Consumer({
        'bootstrap.servers': broker,
        'group.id': str(uuid.uuid4()),
        # 'group.id': 'achieved_throughput_measurer',
        'auto.offset.reset': 'earliest',
        'enable.auto.commit': True,
        'auto.commit.interval.ms': 1000,
        'api.version.request': True,
        'max.poll.interval.ms': 60000
    })

    if 'input' in partitions_with_offsets and len(
            partitions_with_offsets['input']) > 0:
        input_consumer.assign(partitions_with_offsets['input'])
    else:
        input_consumer.subscribe(['read', 'update', 'transfer'])

    if 'output' in partitions_with_offsets and len(
            partitions_with_offsets['output']) > 0:
        output_consumer.assign(partitions_with_offsets['output'])
    else:
        output_consumer.subscribe(['responses'])

    while True:
        msgs = input_consumer.consume(timeout=5, num_messages=500)
        if len(msgs) == 0:
            break
        for msg in msgs:
            try:
                wrapped = Wrapper()
                wrapped.ParseFromString(msg.value())

                result = {}
                result['operation'] = msg.topic()
                result['input_time'] = msg.timestamp()[1]
                result_dict[wrapped.request_id] = result
            except DecodeError as e:
                print("Could not decode?")
                pass

    partitions_with_offsets['input'] = input_consumer.position(
        input_consumer.assignment())
    input_consumer.close()

    total_messages = 0
    start_time = 0
    end_time = 0
    first = True

    while True:
        msgs = output_consumer.consume(timeout=5, num_messages=500)
        if len(msgs) == 0:
            break
        for msg in msgs:
            response = Response()
            response.ParseFromString(msg.value())
            key = response.request_id
            status_code = response.status_code
            if key in result_dict:
                if first:
                    start_time = msg.timestamp()[1] / 1000
                    first = False
                total_messages += 1
                end_time = msg.timestamp()[1] / 1000
                result_dict[key]['output_time'] = msg.timestamp()[1]
                result_dict[key]['status_code'] = status_code

    partitions_with_offsets['output'] = output_consumer.position(
        output_consumer.assignment())
    output_consumer.close()

    print("Total messages considered: " + str(total_messages))

    if total_messages == 0 or end_time - start_time == 0:
        return 0

    return total_messages / (end_time - start_time)