def test_measurements_runner(subcgroups):
    # Node mock
    t1 = redis_task_with_default_labels('t1', subcgroups)
    t2 = redis_task_with_default_labels('t2', subcgroups)

    runner = MeasurementRunner(
        node=Mock(spec=MesosNode, get_tasks=Mock(return_value=[t1, t2])),
        metrics_storage=Mock(spec=storage.Storage, store=Mock()),
        rdt_enabled=False,
        extra_labels=dict(
            extra_label='extra_value')  # extra label with some extra value
    )
    runner._wait = Mock()
    # Mock to finish after one iteration.
    runner._initialize()
    runner._iterate()

    # Check output metrics.
    got_metrics = runner._metrics_storage.store.call_args[0][0]

    # Internal wca metrics are generated (wca is running, number of task under control,
    # memory usage and profiling information)
    assert_metric(got_metrics, 'wca_up', dict(extra_label='extra_value'))
    assert_metric(got_metrics, 'wca_tasks', expected_metric_value=2)
    # wca & its children memory usage (in bytes)
    assert_metric(got_metrics,
                  'wca_memory_usage_bytes',
                  expected_metric_value=WCA_MEMORY_USAGE * 2 * 1024)

    # Measurements metrics about tasks, based on get_measurements mocks.
    cpu_usage = TASK_CPU_USAGE * (len(subcgroups) if subcgroups else 1)
    assert_metric(got_metrics,
                  'cpu_usage',
                  dict(task_id=t1.task_id),
                  expected_metric_value=cpu_usage)
    assert_metric(got_metrics,
                  'cpu_usage',
                  dict(task_id=t2.task_id),
                  expected_metric_value=cpu_usage)

    # Test whether application and application_version_name were properly generated using
    #   default runner._task_label_generators defined in constructor of MeasurementsRunner.
    assert_metric(got_metrics, 'cpu_usage', {
        'application': t1.name,
        'application_version_name': ''
    })

    # Test whether `initial_task_cpu_assignment` label is attached to task metrics.
    assert_metric(got_metrics, 'cpu_usage',
                  {'initial_task_cpu_assignment': '8.0'})
def test_measurements_wait(sleep_mock):
    with patch('time.time', return_value=1):
        runner = MeasurementRunner(node=Mock(spec=MesosNode,
                                             get_tasks=Mock(return_value=[])),
                                   metrics_storage=Mock(spec=storage.Storage,
                                                        store=Mock()),
                                   rdt_enabled=False,
                                   extra_labels={})

        runner._initialize()
        runner._iterate()
        sleep_mock.assert_called_once_with(1.0)

    with patch('time.time', return_value=1.3):
        runner._iterate()
        sleep_mock.assert_called_with(0.7)
        assert runner._last_iteration == 1.3

    with patch('time.time', return_value=2.5):
        runner._iterate()
        sleep_mock.assert_called_with(0)
示例#3
0
def test_measurements_runner(subcgroups):
    # Node mock
    t1 = redis_task_with_default_labels('t1', subcgroups)
    t2 = redis_task_with_default_labels('t2', subcgroups)

    runner = MeasurementRunner(node=Mock(
        spec=MesosNode, get_tasks=Mock(return_value=[t1, t2])),
                               metrics_storage=Mock(spec=storage.Storage,
                                                    store=Mock()),
                               rdt_enabled=False,
                               gather_hw_mm_topology=False,
                               extra_labels=dict(extra_label='extra_value'))
    runner._wait = Mock()
    # Mock to finish after one iteration.
    runner._initialize()
    runner._iterate()

    # Check output metrics.
    got_metrics = runner._metrics_storage.store.call_args[0][0]

    # Internal wca metrics are generated (wca is running, number of task under control,
    # memory usage and profiling information)
    assert_metric(got_metrics, MetricName.WCA_UP,
                  dict(extra_label='extra_value'))
    assert_metric(got_metrics, MetricName.WCA_TASKS, expected_metric_value=2)
    # wca & its children memory usage (in bytes)
    assert_metric(got_metrics,
                  MetricName.WCA_MEM_USAGE_BYTES,
                  expected_metric_value=WCA_MEMORY_USAGE * 2 * 1024)

    # Measurements metrics about tasks, based on get_measurements mocks.
    cpu_usage = TASK_CPU_USAGE * (len(subcgroups) if subcgroups else 1)
    assert_metric(got_metrics,
                  MetricName.TASK_CPU_USAGE_SECONDS,
                  dict(task_id=t1.task_id),
                  expected_metric_value=cpu_usage)
    assert_metric(got_metrics,
                  MetricName.TASK_CPU_USAGE_SECONDS,
                  dict(task_id=t2.task_id),
                  expected_metric_value=cpu_usage)
示例#4
0
def test_measurements_runner_init_and_checks(rdt_enabled, resctrl_available,
                                             monitoring_available, access_ok,
                                             ok):
    # auto rdt
    runner = MeasurementRunner(
        node=Mock(spec=MesosNode),
        metrics_storage=Mock(spec=storage.Storage),
        rdt_enabled=rdt_enabled,
    )

    platform_mock = Mock(rdt_information=Mock(is_monitoring_enabled=Mock(
        return_value=monitoring_available)))

    with patch('wca.resctrl.check_resctrl', return_value=resctrl_available), \
            patch('wca.security.are_privileges_sufficient', return_value=access_ok), \
            patch('wca.platforms.collect_platform_information',
                  return_value=(platform_mock, None, None)):
        if ok:
            # ok no error
            assert runner._initialize() is None
        else:
            # fails
            assert runner._initialize() == 1