コード例 #1
0
def test_measurements_runner(subcgroups):
    # Node mock
    t1 = redis_task_with_default_labels('t1', subcgroups)
    t2 = redis_task_with_default_labels('t2', subcgroups)

    runner = MeasurementRunner(
        node=Mock(spec=MesosNode, get_tasks=Mock(return_value=[t1, t2])),
        metrics_storage=Mock(spec=storage.Storage, store=Mock()),
        rdt_enabled=False,
        extra_labels=dict(
            extra_label='extra_value')  # extra label with some extra value
    )
    runner._wait = Mock()
    # Mock to finish after one iteration.
    runner._initialize()
    runner._iterate()

    # Check output metrics.
    got_metrics = runner._metrics_storage.store.call_args[0][0]

    # Internal wca metrics are generated (wca is running, number of task under control,
    # memory usage and profiling information)
    assert_metric(got_metrics, 'wca_up', dict(extra_label='extra_value'))
    assert_metric(got_metrics, 'wca_tasks', expected_metric_value=2)
    # wca & its children memory usage (in bytes)
    assert_metric(got_metrics,
                  'wca_memory_usage_bytes',
                  expected_metric_value=WCA_MEMORY_USAGE * 2 * 1024)

    # Measurements metrics about tasks, based on get_measurements mocks.
    cpu_usage = TASK_CPU_USAGE * (len(subcgroups) if subcgroups else 1)
    assert_metric(got_metrics,
                  'cpu_usage',
                  dict(task_id=t1.task_id),
                  expected_metric_value=cpu_usage)
    assert_metric(got_metrics,
                  'cpu_usage',
                  dict(task_id=t2.task_id),
                  expected_metric_value=cpu_usage)

    # Test whether application and application_version_name were properly generated using
    #   default runner._task_label_generators defined in constructor of MeasurementsRunner.
    assert_metric(got_metrics, 'cpu_usage', {
        'application': t1.name,
        'application_version_name': ''
    })

    # Test whether `initial_task_cpu_assignment` label is attached to task metrics.
    assert_metric(got_metrics, 'cpu_usage',
                  {'initial_task_cpu_assignment': '8.0'})
コード例 #2
0
def test_measurements_runner(subcgroups):
    # Node mock
    t1 = redis_task_with_default_labels('t1', subcgroups)
    t2 = redis_task_with_default_labels('t2', subcgroups)

    runner = MeasurementRunner(node=Mock(
        spec=MesosNode, get_tasks=Mock(return_value=[t1, t2])),
                               metrics_storage=Mock(spec=storage.Storage,
                                                    store=Mock()),
                               rdt_enabled=False,
                               gather_hw_mm_topology=False,
                               extra_labels=dict(extra_label='extra_value'))
    runner._wait = Mock()
    # Mock to finish after one iteration.
    runner._initialize()
    runner._iterate()

    # Check output metrics.
    got_metrics = runner._metrics_storage.store.call_args[0][0]

    # Internal wca metrics are generated (wca is running, number of task under control,
    # memory usage and profiling information)
    assert_metric(got_metrics, MetricName.WCA_UP,
                  dict(extra_label='extra_value'))
    assert_metric(got_metrics, MetricName.WCA_TASKS, expected_metric_value=2)
    # wca & its children memory usage (in bytes)
    assert_metric(got_metrics,
                  MetricName.WCA_MEM_USAGE_BYTES,
                  expected_metric_value=WCA_MEMORY_USAGE * 2 * 1024)

    # Measurements metrics about tasks, based on get_measurements mocks.
    cpu_usage = TASK_CPU_USAGE * (len(subcgroups) if subcgroups else 1)
    assert_metric(got_metrics,
                  MetricName.TASK_CPU_USAGE_SECONDS,
                  dict(task_id=t1.task_id),
                  expected_metric_value=cpu_usage)
    assert_metric(got_metrics,
                  MetricName.TASK_CPU_USAGE_SECONDS,
                  dict(task_id=t2.task_id),
                  expected_metric_value=cpu_usage)