示例#1
0
def test_Quotas(metrics):
    sensor = metrics.sensor('test')
    sensor.add(metrics.metric_name('test1.total', 'grp1'), Total(),
               MetricConfig(quota=Quota.upper_bound(5.0)))
    sensor.add(metrics.metric_name('test2.total', 'grp1'), Total(),
               MetricConfig(quota=Quota.lower_bound(0.0)))
    sensor.record(5.0)
    with pytest.raises(QuotaViolationError):
        sensor.record(1.0)

    assert abs(6.0 - metrics.metrics.get(metrics.metric_name('test1.total', 'grp1')).value()) \
            < EPS

    sensor.record(-6.0)
    with pytest.raises(QuotaViolationError):
        sensor.record(-1.0)
示例#2
0
def test_Percentiles(metrics):
    buckets = 100
    _percentiles = [
        Percentile(metrics.metric_name('test.p25', 'grp1'), 25),
        Percentile(metrics.metric_name('test.p50', 'grp1'), 50),
        Percentile(metrics.metric_name('test.p75', 'grp1'), 75),
    ]
    percs = Percentiles(4 * buckets, BucketSizing.CONSTANT, 100.0, 0.0,
                        percentiles=_percentiles)
    config = MetricConfig(event_window=50, samples=2)
    sensor = metrics.sensor('test', config)
    sensor.add_compound(percs)
    p25 = metrics.metrics.get(metrics.metric_name('test.p25', 'grp1'))
    p50 = metrics.metrics.get(metrics.metric_name('test.p50', 'grp1'))
    p75 = metrics.metrics.get(metrics.metric_name('test.p75', 'grp1'))

    # record two windows worth of sequential values
    for i in range(buckets):
        sensor.record(i)

    assert abs(p25.value() - 25) < 1.0
    assert abs(p50.value() - 50) < 1.0
    assert abs(p75.value() - 75) < 1.0

    for i in range(buckets):
        sensor.record(0.0)

    assert p25.value() < 1.0
    assert p50.value() < 1.0
    assert p75.value() < 1.0
示例#3
0
    def __init__(self, **configs):
        log.debug("Starting KafkaAdminClient with configuration: %s", configs)
        extra_configs = set(configs).difference(self.DEFAULT_CONFIG)
        if extra_configs:
            raise KafkaConfigurationError(
                "Unrecognized configs: {}".format(extra_configs))

        self.config = copy.copy(self.DEFAULT_CONFIG)
        self.config.update(configs)

        # Configure metrics
        metrics_tags = {'client-id': self.config['client_id']}
        metric_config = MetricConfig(
            samples=self.config['metrics_num_samples'],
            time_window_ms=self.config['metrics_sample_window_ms'],
            tags=metrics_tags)
        reporters = [
            reporter() for reporter in self.config['metric_reporters']
        ]
        self._metrics = Metrics(metric_config, reporters)

        self._client = KafkaClient(metrics=self._metrics,
                                   metric_group_prefix='admin',
                                   **self.config)

        # Get auto-discovered version from client if necessary
        if self.config['api_version'] is None:
            self.config['api_version'] = self._client.config['api_version']

        self._closed = False
        self._refresh_controller_id()
        log.debug("KafkaAdminClient started.")
def test_rate_windowing(mocker, time_keeper, metrics):
    mocker.patch('time.time', side_effect=time_keeper.time)

    # Use the default time window. Set 3 samples
    config = MetricConfig(samples=3)
    sensor = metrics.sensor('test.sensor', config)
    sensor.add(metrics.metric_name('test.rate', 'grp1'),
               Rate(TimeUnit.SECONDS))

    sum_val = 0
    count = config.samples - 1
    # Advance 1 window after every record
    for i in range(count):
        sensor.record(100)
        sum_val += 100
        time_keeper.sleep(config.time_window_ms / 1000.0)

    # Sleep for half the window.
    time_keeper.sleep(config.time_window_ms / 2.0 / 1000.0)

    # prior to any time passing
    elapsed_secs = (config.time_window_ms * (config.samples - 1) +
                    config.time_window_ms / 2.0) / 1000.0

    kafka_metric = metrics.metrics.get(metrics.metric_name(
        'test.rate', 'grp1'))
    assert abs((sum_val / elapsed_secs) - kafka_metric.value()) < EPS, \
            'Rate(0...2) = 2.666'
    assert abs(elapsed_secs - (kafka_metric.measurable.window_size(config, time.time() * 1000) / 1000.0)) \
            < EPS, 'Elapsed Time = 75 seconds'
示例#5
0
def test_event_windowing(mocker, time_keeper):
    mocker.patch('time.time', side_effect=time_keeper.time)

    count = Count()
    config = MetricConfig(event_window=1, samples=2)
    count.record(config, 1.0, time_keeper.ms())
    count.record(config, 1.0, time_keeper.ms())
    assert 2.0 == count.measure(config, time_keeper.ms())
    count.record(config, 1.0, time_keeper.ms())  # first event times out
    assert 2.0 == count.measure(config, time_keeper.ms())
示例#6
0
    def __init__(self, *topics, **configs):
        self.config = copy.copy(self.DEFAULT_CONFIG)
        for key in self.config:
            if key in configs:
                self.config[key] = configs.pop(key)

        # Only check for extra config keys in top-level class
        assert not configs, 'Unrecognized configs: %s' % configs

        deprecated = {'smallest': 'earliest', 'largest': 'latest'}
        if self.config['auto_offset_reset'] in deprecated:
            new_config = deprecated[self.config['auto_offset_reset']]
            log.warning('use auto_offset_reset=%s (%s is deprecated)',
                        new_config, self.config['auto_offset_reset'])
            self.config['auto_offset_reset'] = new_config

        metrics_tags = {'client-id': self.config['client_id']}
        metric_config = MetricConfig(samples=self.config['metrics_num_samples'],
                                     time_window_ms=self.config['metrics_sample_window_ms'],
                                     tags=metrics_tags)
        reporters = [reporter() for reporter in self.config['metric_reporters']]
        self._metrics = Metrics(metric_config, reporters)
        # TODO _metrics likely needs to be passed to KafkaClient, etc.

        # api_version was previously a str. Accept old format for now
        if isinstance(self.config['api_version'], str):
            str_version = self.config['api_version']
            if str_version == 'auto':
                self.config['api_version'] = None
            else:
                self.config['api_version'] = tuple(map(int, str_version.split('.')))
            log.warning('use api_version=%s [tuple] -- "%s" as str is deprecated',
                        str(self.config['api_version']), str_version)

        self._client = KafkaClient(metrics=self._metrics, **self.config)

        # Get auto-discovered version from client if necessary
        if self.config['api_version'] is None:
            self.config['api_version'] = self._client.config['api_version']

        self._subscription = SubscriptionState(self.config['auto_offset_reset'])
        self._fetcher = Fetcher(
            self._client, self._subscription, self._metrics, **self.config)
        self._coordinator = ConsumerCoordinator(
            self._client, self._subscription, self._metrics,
            assignors=self.config['partition_assignment_strategy'],
            **self.config)
        self._closed = False
        self._iterator = None
        self._consumer_timeout = float('inf')

        if topics:
            self._subscription.subscribe(topics=topics)
            self._client.set_topics(topics)
示例#7
0
def test_time_windowing(mocker, time_keeper):
    mocker.patch('time.time', side_effect=time_keeper.time)

    count = Count()
    config = MetricConfig(time_window_ms=1, samples=2)
    count.record(config, 1.0, time_keeper.ms())
    time_keeper.sleep(.001)
    count.record(config, 1.0, time_keeper.ms())
    assert 2.0 == count.measure(config, time_keeper.ms())
    time_keeper.sleep(.001)
    count.record(config, 1.0, time_keeper.ms())  # oldest event times out
    assert 2.0 == count.measure(config, time_keeper.ms())
示例#8
0
 def __init__(self, config):
     Thread.__init__(self)
     self.config = config
     self.timeout_ms = 10000
     self.kafka_client = None
     self.running = True
     self.sc = None
     metrics_tags = {"client-id": self.config["client_id"]}
     metric_config = MetricConfig(samples=2, time_window_ms=30000, tags=metrics_tags)
     self._metrics = Metrics(metric_config, reporters=[])
     self.lock = Lock()
     self.lock.acquire()
     self.log = logging.getLogger("MasterCoordinator")
示例#9
0
    def __init__(self, *topics, **configs):
        self.config = copy.copy(self.DEFAULT_CONFIG)
        for key in self.config:
            if key in configs:
                self.config[key] = configs.pop(key)

        # Only check for extra config keys in top-level class
        assert not configs, 'Unrecognized configs: %s' % configs

        deprecated = {'smallest': 'earliest', 'largest': 'latest' }
        if self.config['auto_offset_reset'] in deprecated:
            new_config = deprecated[self.config['auto_offset_reset']]
            log.warning('use auto_offset_reset=%s (%s is deprecated)',
                        new_config, self.config['auto_offset_reset'])
            self.config['auto_offset_reset'] = new_config

        metrics_tags = {'client-id': self.config['client_id']}
        metric_config = MetricConfig(samples=self.config['metrics_num_samples'],
                                     time_window_ms=self.config['metrics_sample_window_ms'],
                                     tags=metrics_tags)
        reporters = [reporter() for reporter in self.config['metric_reporters']]
        reporters.append(DictReporter('kafka.consumer'))
        self._metrics = Metrics(metric_config, reporters)
        metric_group_prefix = 'consumer'
        # TODO _metrics likely needs to be passed to KafkaClient, etc.

        self._client = KafkaClient(**self.config)

        # Check Broker Version if not set explicitly
        if self.config['api_version'] == 'auto':
            self.config['api_version'] = self._client.check_version()
        assert self.config['api_version'] in ('0.9', '0.8.2', '0.8.1', '0.8.0'), 'Unrecognized api version'

        # Convert api_version config to tuple for easy comparisons
        self.config['api_version'] = tuple(
            map(int, self.config['api_version'].split('.')))

        self._subscription = SubscriptionState(self.config['auto_offset_reset'])
        self._fetcher = Fetcher(
            self._client, self._subscription, self._metrics, metric_group_prefix, **self.config)
        self._coordinator = ConsumerCoordinator(
            self._client, self._subscription, self._metrics, metric_group_prefix,
            assignors=self.config['partition_assignment_strategy'],
            **self.config)
        self._closed = False
        self._iterator = None
        self._consumer_timeout = float('inf')

        if topics:
            self._subscription.subscribe(topics=topics)
            self._client.set_topics(topics)
示例#10
0
    def __init__(self, **configs):
        log.debug("Starting Kafka administration interface")
        extra_configs = set(configs).difference(self.DEFAULT_CONFIG)
        if extra_configs:
            raise KafkaConfigurationError("Unrecognized configs: %s" %
                                          extra_configs)

        self.config = copy.copy(self.DEFAULT_CONFIG)
        self.config.update(configs)

        # api_version was previously a str. accept old format for now
        if isinstance(self.config['api_version'], str):
            deprecated = self.config['api_version']
            if deprecated == 'auto':
                self.config['api_version'] = None
            else:
                self.config['api_version'] = tuple(
                    map(int, deprecated.split('.')))
            log.warning(
                'use api_version=%s [tuple] -- "%s" as str is deprecated',
                str(self.config['api_version']), deprecated)

        # Configure metrics
        metrics_tags = {'client-id': self.config['client_id']}
        metric_config = MetricConfig(
            samples=self.config['metrics_num_samples'],
            time_window_ms=self.config['metrics_sample_window_ms'],
            tags=metrics_tags)
        reporters = [
            reporter() for reporter in self.config['metric_reporters']
        ]
        self._metrics = Metrics(metric_config, reporters)

        self._client = KafkaClient(metrics=self._metrics,
                                   metric_group_prefix='admin',
                                   **self.config)

        # Get auto-discovered version from client if necessary
        if self.config['api_version'] is None:
            self.config['api_version'] = self._client.config['api_version']

        self._closed = False
        self._refresh_controller_id()
        log.debug('Kafka administration interface started')
    def __init__(self,
                 default_config=None,
                 reporters=None,
                 enable_expiration=False):
        """
        Create a metrics repository with a default config, given metric
        reporters and the ability to expire eligible sensors

        Arguments:
            default_config (MetricConfig, optional): The default config
            reporters (list of AbstractMetricsReporter, optional):
                The metrics reporters
            enable_expiration (bool, optional): true if the metrics instance
                can garbage collect inactive sensors, false otherwise
        """
        self._lock = threading.RLock()
        self._config = default_config or MetricConfig()
        self._sensors = {}
        self._metrics = {}
        self._children_sensors = {}
        self._reporters = reporters or []
        for reporter in self._reporters:
            reporter.init([])

        if enable_expiration:

            def expire_loop():
                while True:
                    # delay 30 seconds
                    time.sleep(30)
                    self.ExpireSensorTask.run(self)

            metrics_scheduler = threading.Thread(target=expire_loop)
            # Creating a daemon thread to not block shutdown
            metrics_scheduler.daemon = True
            metrics_scheduler.start()

        self.add_metric(
            self.metric_name("count", "kafka-metrics-count",
                             "total number of registered metrics"),
            AnonMeasurable(lambda config, now: len(self._metrics)),
        )
示例#12
0
def test_old_data_has_no_effect(mocker, time_keeper):
    mocker.patch('time.time', side_effect=time_keeper.time)

    max_stat = Max()
    min_stat = Min()
    avg_stat = Avg()
    count_stat = Count()
    window_ms = 100
    samples = 2
    config = MetricConfig(time_window_ms=window_ms, samples=samples)
    max_stat.record(config, 50, time_keeper.ms())
    min_stat.record(config, 50, time_keeper.ms())
    avg_stat.record(config, 50, time_keeper.ms())
    count_stat.record(config, 50, time_keeper.ms())

    time_keeper.sleep(samples * window_ms / 1000.0)
    assert float('-inf') == max_stat.measure(config, time_keeper.ms())
    assert float(sys.maxsize) == min_stat.measure(config, time_keeper.ms())
    assert 0.0 == avg_stat.measure(config, time_keeper.ms())
    assert 0 == count_stat.measure(config, time_keeper.ms())
示例#13
0
    def __init__(self, **configs):
        log.debug("Starting the Kafka producer")  # trace
        self.config = copy.copy(self.DEFAULT_CONFIG)
        for key in self.config:
            if key in configs:
                self.config[key] = configs.pop(key)

        # Only check for extra config keys in top-level class
        assert not configs, 'Unrecognized configs: %s' % (configs,)

        if self.config['client_id'] is None:
            self.config['client_id'] = 'kafka-python-producer-%s' % \
                                       (PRODUCER_CLIENT_ID_SEQUENCE.increment(),)

        if self.config['acks'] == 'all':
            self.config['acks'] = -1

        # api_version was previously a str. accept old format for now
        if isinstance(self.config['api_version'], str):
            deprecated = self.config['api_version']
            if deprecated == 'auto':
                self.config['api_version'] = None
            else:
                self.config['api_version'] = tuple(map(int, deprecated.split('.')))
            log.warning('use api_version=%s [tuple] -- "%s" as str is deprecated',
                        str(self.config['api_version']), deprecated)

        # Configure metrics
        metrics_tags = {'client-id': self.config['client_id']}
        metric_config = MetricConfig(samples=self.config['metrics_num_samples'],
                                     time_window_ms=self.config['metrics_sample_window_ms'],
                                     tags=metrics_tags)
        reporters = [reporter() for reporter in self.config['metric_reporters']]
        self._metrics = Metrics(metric_config, reporters)

        client = KafkaClient(metrics=self._metrics, metric_group_prefix='producer',
                             wakeup_timeout_ms=self.config['max_block_ms'],
                             **self.config)

        # Get auto-discovered version from client if necessary
        if self.config['api_version'] is None:
            self.config['api_version'] = client.config['api_version']

        if self.config['compression_type'] == 'lz4':
            assert self.config['api_version'] >= (0, 8, 2), 'LZ4 Requires >= Kafka 0.8.2 Brokers'

        # Check compression_type for library support
        ct = self.config['compression_type']
        if ct not in self._COMPRESSORS:
            raise ValueError("Not supported codec: {}".format(ct))
        else:
            checker, compression_attrs = self._COMPRESSORS[ct]
            assert checker(), "Libraries for {} compression codec not found".format(ct)
            self.config['compression_attrs'] = compression_attrs

        message_version = self._max_usable_produce_magic()
        self._accumulator = RecordAccumulator(message_version=message_version, metrics=self._metrics, **self.config)
        self._metadata = client.cluster
        guarantee_message_order = bool(self.config['max_in_flight_requests_per_connection'] == 1)
        self._sender = Sender(client, self._metadata,
                              self._accumulator, self._metrics,
                              guarantee_message_order=guarantee_message_order,
                              **self.config)
        self._sender.daemon = True
        self._sender.start()
        self._closed = False

        self._cleanup = self._cleanup_factory()
        atexit.register(self._cleanup)
        log.debug("Kafka producer started")
示例#14
0
def config():
    return MetricConfig()