Esempio n. 1
0
    def test_process_change(self):
        consumer = KafkaConsumer(
            topics.CASE,
            group_id='test-consumer',
            bootstrap_servers=[settings.KAFKA_URL],
            consumer_timeout_ms=100,
        )
        pillow = ChangeFeedPillow(self._fake_couch, kafka=get_kafka_client(), checkpoint=None)
        document = {
            'doc_type': 'CommCareCase',
            'type': 'mother',
            'domain': 'kafka-test-domain',
        }
        pillow.process_change(Change(id='test-id', sequence_id='3', document=document))
        message = consumer.next()

        change_meta = change_meta_from_kafka_message(message.value)
        self.assertEqual(COUCH, change_meta.data_source_type)
        self.assertEqual(self._fake_couch.dbname, change_meta.data_source_name)
        self.assertEqual('test-id', change_meta.document_id)
        self.assertEqual(document['doc_type'], change_meta.document_type)
        self.assertEqual(document['type'], change_meta.document_subtype)
        self.assertEqual(document['domain'], change_meta.domain)
        self.assertEqual(False, change_meta.is_deletion)

        with self.assertRaises(ConsumerTimeout):
            consumer.next()
Esempio n. 2
0
def _get_topic_offsets(topics, latest):
    """
    :param topics: list of topics
    :param latest: True to fetch latest offsets, False to fetch earliest available
    :return: dict: { (topic, partition): offset, ... }
    """

    # https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetRequest
    # https://cfchou.github.io/blog/2015/04/23/a-closer-look-at-kafka-offsetrequest/
    assert set(topics) <= set(ALL)
    client = get_kafka_client()
    partition_meta = client.topic_partitions

    # only return the offset of the latest message in the partition
    num_offsets = 1
    time_value = -1 if latest else -2

    offsets = {}
    offset_requests = []
    for topic in topics:
        partitions = list(partition_meta.get(topic, {}))
        for partition in partitions:
            offsets[(kafka_bytestring(topic), partition)] = None
            offset_requests.append(
                OffsetRequest(kafka_bytestring(topic), partition, time_value,
                              num_offsets))

    responses = client.send_offset_request(offset_requests)
    for r in responses:
        offsets[(kafka_bytestring(r.topic), r.partition)] = r.offsets[0]

    return offsets
Esempio n. 3
0
def create_kafka_topics():
    client = get_kafka_client()
    for topic in topics.ALL:
        if client.has_metadata_for_topic(topic):
            status = "already exists"
        else:
            client.ensure_topic_exists(topic, timeout=10)
            status = "created"
        print("topic {}: {}".format(status, topic))
Esempio n. 4
0
def create_kafka_topics():
    client = get_kafka_client()
    for topic in topics.ALL:
        if client.has_metadata_for_topic(topic):
            status = "already exists"
        else:
            client.ensure_topic_exists(topic, timeout=10)
            status = "created"
        print("topic {}: {}".format(status, topic))
Esempio n. 5
0
def _get_topic_offsets(topics, latest):
    # https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetRequest
    time_value = -1 if latest else -2
    assert set(topics) <= set(ALL)
    client = get_kafka_client()
    offset_requests = [OffsetRequest(topic, 0, time_value, 1) for topic in topics]
    responses = client.send_offset_request(offset_requests)
    return {
        r.topic: r.offsets[0] for r in responses
    }
Esempio n. 6
0
def _get_topic_offsets(topics, latest):
    # https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetRequest
    time_value = -1 if latest else -2
    assert set(topics) <= set(ALL)
    client = get_kafka_client()
    offset_requests = [
        OffsetRequest(topic, 0, time_value, 1) for topic in topics
    ]
    responses = client.send_offset_request(offset_requests)
    return {r.topic: r.offsets[0] for r in responses}
Esempio n. 7
0
 def setUp(cls):
     cls._fake_couch = FakeCouchDb()
     cls._fake_couch.dbname = 'test-couchdb'
     with trap_extra_setup(KafkaUnavailableError):
         cls.consumer = KafkaConsumer(
             topics.CASE,
             group_id='test-consumer',
             bootstrap_servers=[settings.KAFKA_URL],
             consumer_timeout_ms=100,
         )
     cls.pillow = ChangeFeedPillow(cls._fake_couch, kafka=get_kafka_client(), checkpoint=None)
Esempio n. 8
0
def check_kafka():
    try:
        client = get_kafka_client()
    except Exception as e:
        return ServiceStatus(False, "Could not connect to Kafka: %s" % e)

    if len(client.cluster.brokers()) == 0:
        return ServiceStatus(False, "No Kafka brokers found")
    elif len(client.cluster.topics()) == 0:
        return ServiceStatus(False, "No Kafka topics found")
    else:
        return ServiceStatus(True, "Kafka seems to be in order")
Esempio n. 9
0
def check_kafka():
    try:
        client = get_kafka_client()
    except Exception as e:
        return ServiceStatus(False, "Could not connect to Kafka: %s" % e)

    if len(client.cluster.brokers()) == 0:
        return ServiceStatus(False, "No Kafka brokers found")
    elif len(client.cluster.topics()) == 0:
        return ServiceStatus(False, "No Kafka topics found")
    else:
        return ServiceStatus(True, "Kafka seems to be in order")