def test_allocations_generate_metrics(tasks_allocations, expected_metrics): """Check that proper allocations metrics are generated. """ containers = {task('/t1'): container('/t1'), task('/t2'): container('/t2'), } platform_mock.rdt_information.rdt_mb_control_enabled = True allocations_values = TasksAllocationsValues.create( True, tasks_allocations, containers, platform_mock) allocations_values.validate() metrics_got = allocations_values.generate_metrics() assert metrics_got == expected_metrics
def test_convert_invalid_task_allocations(tasks_allocations, expected_error): """After allocations are converted, check that for improper input values proper validation exception with appropriate message is raised.""" containers = {task('/t1'): container('/t1'), task('/t2'): container('/t2'), task('/t3'): container('/t3'), } with pytest.raises(InvalidAllocations, match=expected_error): got_allocations_values = TasksAllocationsValues.create( True, tasks_allocations, containers, platform_mock) got_allocations_values.validate()
def test_unique_rdt_allocations(tasks_allocations, expected_resgroup_reallocation_count): """Checks if allocation of resctrl group is performed only once if more than one task_allocations has RDTAllocation with the same name. In other words, check if unnecessary reallocation of resctrl group does not take place. The goal is achieved by checking how many times Container.write_schemata is called with allocate_rdt=True.""" containers = {task('/t1'): container('/t1', resgroup_name='', with_config=True), task('/t2'): container('/t2', resgroup_name='', with_config=True)} allocations_values = TasksAllocationsValues.create( True, tasks_allocations, containers, platform_mock) allocations_values.validate() with patch('wca.resctrl.ResGroup.write_schemata') as mock, \ patch('wca.cgroups.Cgroup._write'), patch('wca.cgroups.Cgroup._read'): allocations_values.perform_allocations() assert mock.call_count == expected_resgroup_reallocation_count
def test_prepare_task_data_cgroup_not_found(*mocks): containers = { task('/t1', labels={'label_key': 'label_value'}, resources={'cpu': 3}): Container('/t1', platform_mock) } tasks_measurements, tasks_resources, tasks_labels = \ _prepare_tasks_data(containers) assert tasks_measurements == {}
def test_prepare_task_data_cgroup_not_found(*mocks): containers = { task('/t1', labels={'label_key': 'label_value'}, resources={'cpu': 3}): Container('/t1', platform_mock) } with pytest.raises(MissingMeasurementException): tasks_measurements, tasks_resources, tasks_labels = \ _prepare_tasks_data(containers)
def test_prepare_task_data_cgroup_not_found(*mocks): rdt_information = RDTInformation(True, True, True, True, '0', '0', 0, 0, 0) containers = { task('/t1', labels={'label_key': 'label_value'}, resources={'cpu': 3}): Container('/t1', 1, 1, rdt_information) } tasks_measurements, tasks_resources, tasks_labels = \ _prepare_tasks_data(containers) assert tasks_measurements == {}
def test_get_tasks_allocations_fail(*mock): containers = { task('/t1', labels={'label_key': 'label_value'}, resources={'cpu': 3}): Container('/t1', platform_mock, allocation_configuration=AllocationConfiguration( cpu_quota_period=1000)) } assert {} == _get_tasks_allocations(containers)
def test_get_tasks_allocations_fail(*mock): rdt_information = RDTInformation(True, True, True, True, '0', '0', 0, 0, 0) containers = { task('/t1', labels={'label_key': 'label_value'}, resources={'cpu': 3}): Container('/t1', 1, 1, rdt_information, allocation_configuration=AllocationConfiguration( cpu_quota_period=1000)) } assert {} == _get_tasks_allocations(containers)
def test_append_additional_labels_to_tasks__overwriting_label(log_mock): """Should not ovewrite existing previously label.""" task1 = task('/t1', labels={'source_key': '__val__'}) append_additional_labels_to_tasks( { 'source_key': TaskLabelRegexGenerator('__(.*)__', '\\1', 'non_existing_key') }, [task1]) assert task1.labels['source_key'] == '__val__' log_mock.debug.assert_called_once()
def test_append_additional_labels_to_tasks__generate_returns_None(log_mock): """Generate method for generator returns None.""" class TestTaskLabelGenerator(TaskLabelGenerator): def generate(self, task): return None task1 = task('/t1', labels={'source_key': 'source_val'}) append_additional_labels_to_tasks({'target_key': TestTaskLabelGenerator()}, [task1]) log_mock.debug.assert_called_once()
def test_prepare_tasks_data(*mocks): containers = { task('/t1', labels={'label_key': 'label_value'}, resources={'cpu': 3}): Container('/t1', platform_mock) } tasks_measurements, tasks_resources, tasks_labels = _prepare_tasks_data( containers) assert tasks_measurements == {'t1_task_id': {'cpu_usage': 13}} assert tasks_resources == {'t1_task_id': {'cpu': 3}} assert tasks_labels == {'t1_task_id': {'label_key': 'label_value'}}
def test_prepare_tasks_data(*mocks): rdt_information = RDTInformation(True, True, True, True, '0', '0', 0, 0, 0) containers = { task('/t1', labels={'label_key': 'label_value'}, resources={'cpu': 3}): Container('/t1', 1, 1, rdt_information) } tasks_measurements, tasks_resources, tasks_labels = _prepare_tasks_data( containers) assert tasks_measurements == {'t1_task_id': {'cpu_usage': 13}} assert tasks_resources == {'t1_task_id': {'cpu': 3}} assert tasks_labels == {'t1_task_id': {'label_key': 'label_value'}}
def test_prepare_tasks_data(*mocks): t = task('/t1', labels={'label_key': 'label_value'}, resources={'cpu': 3}) containers = {t: Container('/t1', platform_mock)} tasks_data = _prepare_tasks_data(containers) assert tasks_data == { 't1_task_id': TaskData(t.name, t.task_id, t.cgroup_path, t.subcgroups_paths, t.labels, t.resources, { 'task_up': 1, 'task_last_seen': 12345.6, 'task_cpu_usage_seconds': 13 }) }
def test_task_label_regex_generator(source_val, pattern, repl, expected_val): task1 = task('/t1', labels={'source_key': source_val}) task_label_regex_generator = TaskLabelRegexGenerator( pattern, repl, 'source_key') assert expected_val == task_label_regex_generator.generate(task1)
def test_sync_containers_state(_, get_pids_mock, sync_mock, perf_counters_mock, add_pids_mock, clean_taskless_groups_mock, subcgroups, tasks_, pre_running_containers_, mon_groups_relation, expected_running_containers_, labels_relation_, pre_running_labels_relation_): """Tests both Container and ContainerSet classes. Note: the input arguments tasks_, existing_containers_, expected_running_containers_ contain in their names underscore at the end to distinguish them from the ones created inside the function body to emphasize the relationship: the input arguments is used to create real objects. We cannot pass already created objects, as to create them we need another argument from first of two paramatrize decorators: subcgroups. Note: we have three variables names with the same postfix: * pre_running_containers - state of ContainerManager before (pre) call sync_containers_state * expected_running_containers - similar as above but state expected after the call, * got_running_containers - similar as above but state which we got after the call. All of three are of the same type Dict[Task, ContainerInterface]. """ # Create Task and Container/ContainerSet objects from input arguments. # This is done to both test Container and ContainerSet classes (to pass # subcgroups argument into the constructing function >>container<<. tasks = [task(t, subcgroups_paths=subcgroups, labels=labels_relation_.get(t)) for t in tasks_] pre_running_containers = \ {task(t, subcgroups_paths=subcgroups, labels=pre_running_labels_relation_.get(t)): container(c, subcgroups) for t, c in pre_running_containers_.items()} expected_running_containers = \ {task(t, subcgroups_paths=subcgroups, labels=labels_relation_.get(t)): container(c, subcgroups) for t, c in expected_running_containers_.items()} rdt_information = RDTInformation(True, True, True, True, 'fff', '1', 0, 0, 0) platform_mock = Mock( spec=Platform, sockets=1, cores=1, cpus=1, rdt_information=rdt_information) containers_manager = ContainerManager(platform=platform_mock, allocation_configuration=AllocationConfiguration(), event_names=[], ) # Put in into ContainerManager our input dict of containers. containers_manager.containers = dict(pre_running_containers) # Call sync_containers_state with patch('wca.resctrl.read_mon_groups_relation', return_value=mon_groups_relation): got_running_containers = containers_manager.sync_containers_state(tasks) # ----------------------- # Assert that two sets of keys of two dictionaries got_containers and # expected_running_containers are equal. assert len(got_running_containers) == len(expected_running_containers) assert all([expected_task in got_running_containers for expected_task in expected_running_containers.keys()]) for t in expected_running_containers.keys(): assert_equal_containers(expected_running_containers[t], got_running_containers[t]) # Check container objects has proper resgroup assigned. got_container_resgroup_names = {c.get_name(): c.get_resgroup().name for c in got_running_containers.values()} for expected_resgroup_name, container_names in mon_groups_relation.items(): for container_name in container_names: if container_name in got_container_resgroup_names: got_resgroup_name = got_container_resgroup_names.get(container_name) assert got_resgroup_name == expected_resgroup_name
IMPORTANT: two containers are HERE treated as equal if they have the same cgroup_path set. One special assumption is made about private attribute of ContainerSet: that field _subcontainers exists with type Dict[str, Container].""" assert len(containers_a) == len(containers_b) for a, b in zip(containers_a, containers_b): assert_equal_containers(a, b) @pytest.mark.parametrize( 'discovered_tasks, containers, ' 'expected_new_tasks, expected_containers_to_delete', ( # 1) One new task (t1) was discovered - before there was no containers. ([task('/t1')], [], [task('/t1')], []), # 2) No changes in environment - no actions are expected. ([task('/t1')], [container('/t1')], [], []), # 3) One new task (t2) was discovered. ([task('/t1'), task('/t2')], [container('/t1')], [task('/t2')], []), # 4) No changes in environment - no actions are expected # (but now two containers already are running). ([task('/t1'), task('/t2')], [container('/t1'), container('/t2')], [], []), # 5) First task just disappeared - corresponding container should be removed. ([task('/t2')], [container('/t1'), container('/t2')], [], [container('/t1')]), # 6) Two new task were discovered.