Beispiel #1
0
def __push_events_to_kafka(topic, events):

    call_start = datetime.datetime.now()

    if __get_uid() not in config.UIDS_TO_POST_EVENT:
        logging.info('[#OAUTH_401] Received uuid is not valid for posting: %s', flask.request.token_info.get("uid"))
        return {'detail': 'Not Authorized. You are not allowed to use this endpoint'}, 401

    if not __topic_exists(topic):
        return {'detail': 'Topic does not exist'}, 422

    failed = 0
    for event in events:
        if 'partitioning_key' in event:
            key = event['partitioning_key']
        else:
            key = event['ordering_key']

        try:
            retry_if_failed(__produce_kafka_message, topic.encode('utf-8'), key.encode('utf-8'), json.dumps(event).encode('utf-8'))
        except:
            failed += 1

    ms_elapsed = monitoring.stop_time_measure(call_start)
    logging.info('[#KAFKA_PUSH_TIME] Time spent total for pushing to kafka: %s ms', ms_elapsed)

    metrics_writer.log_events(__get_uid(), topic, "-", len(events) - failed, 0)

    if failed > 0:
        return {'detail': 'Failed to write %s event(s) to kafka' % failed}, 503
    else:
        return {}, 201
    def process_batch(latest_offsets, current_batch):
        for partition in partitions:
            topic_partition = (topic.encode('UTF-8'), partition)
            # send the messages we could read so far
            if len(current_batch[partition]) > 0:
                stream_message = __create_stream_message(partition, latest_offsets[topic_partition],
                                                         current_batch[partition])
                with __measure_time(current_batch[partition], stream_message):
                    yield stream_message
                metrics_writer.log_events(uid, topic, partition, 0, len(current_batch[partition]))

            # just send the keep alive
            else:
                yield __create_stream_message(partition, latest_offsets[topic_partition])
Beispiel #3
0
    def process_batch(latest_offsets, current_batch):
        for partition in partitions:
            topic_partition = (topic.encode('UTF-8'), partition)
            # send the messages we could read so far
            if len(current_batch[partition]) > 0:
                stream_message = __create_stream_message(
                    partition, latest_offsets[topic_partition],
                    current_batch[partition])
                with __measure_time(current_batch[partition], stream_message):
                    yield stream_message
                metrics_writer.log_events(uid, topic, partition, 0,
                                          len(current_batch[partition]))

            # just send the keep alive
            else:
                yield __create_stream_message(partition,
                                              latest_offsets[topic_partition])