Пример #1
0
    def poll(self, timeout_ms):

        value = {
            u"metrics_group_interval": 60,
            u"resource": {
                u"resource_site": u"test_site",
                u"resource_id": u"test_id",
                u"resource_class": u"network",
                u"resource_plugin": u"test_plugin",
                u"resource_creation_timestamp": 1567823517.46,
                u"resource_subclass": u"test_subclass",
                u"resource_endpoint": u"test_endpoint",
                u"resource_metadata": {
                    u"test_metadata_key": u"test_metadata_value",
                    u"_resource_ttl": u"604800"
                },
                u"resource_type": u"test_type"
            },
            u"dimensions": [
                {
                    u"dimension_name": u"cpu_name",
                    u"dimension_value": u"test_cpu_name_value"
                },
                {
                    u"dimension_name": u"cpu_no",
                    u"dimension_value": u"test_cpu_no_value"
                },
                {
                    u"dimension_name": u"cpu_type",
                    u"dimension_value": u"test_cpu_type_value"
                }
            ],
            u"metrics_group_type": u"cpu",
            u"metrics": [
                {
                    u"metric_creation_timestamp": 1567823946.72,
                    u"metric_type": u"gauge",
                    u"metric_name": u"cpu_utilization",
                    u"metric_value": 0
                }
            ],
            u"metrics_group_creation_timestamp": 1567823946.72,
            u"metrics_group_schema_version": u"0.2"
        }

        return {
            u'400000005d73185508707bfc': [ConsumerRecord(
                topic=u'panoptes-metrics', partition=49, offset=704152, timestamp=-1, timestamp_type=0,
                key=b'class:subclass:type', value=json.dumps(value), checksum=-1526904207, serialized_key_size=19,
                serialized_value_size=1140)],
            u'400000005d731855164bb9bc': [ConsumerRecord(
                topic=u'panoptes-metrics', partition=49, offset=704152, timestamp=-1, timestamp_type=0,
                key=b'class:subclass:type::', value=json.dumps(value), checksum=-1526904207, serialized_key_size=19,
                serialized_value_size=1140)]
        }
Пример #2
0
    def test_get_msgs(self):
        # arrange
        rec = Record("http://www.google.com", datetime.now(), 1.1, 200,
                     'itemtype', True)
        msgs = [
            ConsumerRecord(topic='website_health',
                           partition=0,
                           offset=172,
                           timestamp=1606615336458,
                           timestamp_type=0,
                           key=None,
                           value=bytes(rec.to_json(), encoding='utf8'),
                           headers=[],
                           checksum=None,
                           serialized_key_size=-1,
                           serialized_value_size=140,
                           serialized_header_size=-1),
        ]
        kafka_client.consumer = Mock(return_value=msgs)
        database.write_record = Mock()
        # act
        get_msgs()

        # assert
        database.write_record.assert_called_with(rec)
    def mock_consumer(self, KafkaConsumer, value, max_calls=1):
        # Mock a consumer object
        fake_kafka_consumer = MagicMock()

        # Should return a record when used as an iterator. Set up the mock to
        # return the record up to the limit of max_calls. Then raises StopIteration
        record = ConsumerRecord(topic=TOPIC_STATES,
                                partition=0,
                                offset=42,
                                timestamp=1467649216540,
                                timestamp_type=0,
                                key=b'NY',
                                value=value,
                                checksum=binascii.crc32(value),
                                serialized_key_size=b'NY',
                                serialized_value_size=value)

        meta = {'i': 0}

        def _iter(*args, **kwargs):
            if meta['i'] >= max_calls:
                raise StopIteration()
            meta['i'] += 1
            return record

        fake_kafka_consumer.__next__.side_effect = _iter

        # Return some partitions
        fake_kafka_consumer.partitions_for_topic.return_value = set([0, 1])

        # Make class instantiation return our mock
        KafkaConsumer.return_value = fake_kafka_consumer

        return fake_kafka_consumer
Пример #4
0
    def send(self, message, submitted=None, headers=None):
        if submitted is None:
            submitted = datetime.datetime.utcnow()

        if headers is None:
            headers = {}

        if not is_in_test_mode(self.topic_config):
            timestamp_ms = datetime_to_kafka_ts(submitted)
            topic_retry_decorator(self._producer.send)(
                self.topic_config.name,
                value=message,
                timestamp_ms=timestamp_ms,
                headers=self.format_headers(headers),
            )
            topic_retry_decorator(self._producer.flush)()
        else:
            self.topic_config.messages.append((
                submitted,
                ConsumerRecord(
                    topic=self.topic_config.name,
                    partition=0,
                    offset=len(self.topic_config.messages),
                    timestamp=submitted,
                    timestamp_type=0,
                    key=None,
                    value=message,
                    headers=[(k, v) for k, v in headers.items()],
                    checksum=None,
                    serialized_key_size=None,
                    serialized_value_size=None,
                    serialized_header_size=None,
                ),
            ))
Пример #5
0
def test_topic_parse_correctly_records():
    headers = [
        ("header_1", "välûe%_1ù"),
        ("header_2", "välûe%_°2ù"),
        ("header_3", "välûe%_$*3ù"),
    ]

    records_by_partition = {
        TopicPartition(topic="topic", partition="partition"): [
            ConsumerRecord(
                topic="topic",
                partition="partition",
                offset=0,
                timestamp=1562566,
                timestamp_type=0,
                key=None,
                value=kirby_value_serializer("value"),
                headers=[(header[0], kirby_value_serializer(header[1]))
                         for header in headers],
                checksum=None,
                serialized_key_size=None,
                serialized_value_size=None,
                serialized_header_size=None,
            )
        ]
    }

    parsed_records = parse_records(records_by_partition, raw_records=True)
    assert parsed_records[0].headers == {
        header[0]: header[1]
        for header in headers
    }
Пример #6
0
def test_partition_records_no_fetch_offset():
    batch_start = 0
    batch_end = 100
    fetch_offset = 123
    tp = TopicPartition('foo', 0)
    messages = [ConsumerRecord(tp.topic, tp.partition, i,
                               None, None, 'key', 'value', None, 'checksum', 0, 0, -1)
                for i in range(batch_start, batch_end)]
    records = Fetcher.PartitionRecords(fetch_offset, None, messages)
    assert len(records) == 0
Пример #7
0
 def _get_record_metadata(self):
     return ConsumerRecord(topic=TOPIC_STATES,
                           partition=0,
                           offset=42,
                           timestamp=1467649216540,
                           timestamp_type=0,
                           key=b'NY',
                           value=b'foo',
                           checksum=binascii.crc32(b'foo'),
                           serialized_key_size=b'NY',
                           serialized_value_size=b'foo')
Пример #8
0
def test_partition_records_compacted_offset():
    """Test that messagesets are handle correctly
    when the fetch offset points to a message that has been compacted
    """
    batch_start = 0
    batch_end = 100
    fetch_offset = 42
    tp = TopicPartition('foo', 0)
    messages = [ConsumerRecord(tp.topic, tp.partition, i,
                               None, None, 'key', 'value', None, 'checksum', 0, 0, -1)
                for i in range(batch_start, batch_end) if i != fetch_offset]
    records = Fetcher.PartitionRecords(fetch_offset, None, messages)
    assert len(records) == batch_end - fetch_offset - 1
    msgs = records.take(1)
    assert msgs[0].offset == fetch_offset + 1
Пример #9
0
 def _get_record_metadata(self):
     return ConsumerRecord(
         topic=TOPIC_STATES,
         partition=0,
         offset=42,
         timestamp=1467649216540,
         timestamp_type=0,
         key=b"NY",
         value=b"foo",
         headers=None,
         checksum=binascii.crc32(b"foo"),
         serialized_key_size=b"NY",
         serialized_value_size=b"foo",
         serialized_header_size=0,
     )
    def test_process_kafka_message(self):
        cr = ConsumerRecord(
            topic="test_topic",
            partition=0,
            offset=0,
            timestamp=1583312930269,
            timestamp_type=0,
            headers=[],
            checksum=None,
            serialized_key_size=-1,
            serialized_value_size=2114,
            serialized_header_size=-1,
            key=None,
            value=b'{"k1": "v1","k2": "v2","uid": "uid_value","ts": 1468244385}'
        )

        uid, dt = consumer.process_kafka_message(cr)
        assert uid == "uid_value"
        assert dt == datetime.fromtimestamp(1468244385)
Пример #11
0
def test_partition_records_offset():
    """Test that compressed messagesets are handle correctly
    when fetch offset is in the middle of the message list
    """
    batch_start = 120
    batch_end = 130
    fetch_offset = 123
    tp = TopicPartition('foo', 0)
    messages = [ConsumerRecord(tp.topic, tp.partition, i,
                               None, None, 'key', 'value', [], 'checksum', 0, 0, -1)
                for i in range(batch_start, batch_end)]
    records = Fetcher.PartitionRecords(fetch_offset, None, messages)
    assert len(records) > 0
    msgs = records.take(1)
    assert msgs[0].offset == fetch_offset
    assert records.fetch_offset == fetch_offset + 1
    msgs = records.take(2)
    assert len(msgs) == 2
    assert len(records) > 0
    records.discard()
    assert len(records) == 0