def test_present_version_update(self): set_module_args({ 'name': 'coffee-script', 'global': 'true', 'state': 'present', 'version': '2.5.1' }) self.module_main_command.side_effect = [ (0, '{"dependencies": {"coffee-script": {"version" : "2.5.0"}}}', ''), (0, '{}', ''), ] result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.module_main_command.assert_has_calls([ call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None), call( ['/testbin/npm', 'install', '--global', '[email protected]'], check_rc=True, cwd=None), ])
def test_iterate_over_pageable_resource_should_pass_with_string_offset_and_limit( self): resource_func = mock.Mock(side_effect=[ { 'items': ['foo'] }, { 'items': [] }, ]) items = iterate_over_pageable_resource( resource_func, {'query_params': { 'offset': '1', 'limit': '1' }}) assert ['foo'] == list(items) resource_func.assert_has_calls([ call(params={'query_params': { 'offset': '1', 'limit': '1' }}), call(params={'query_params': { 'offset': 2, 'limit': '1' }}) ])
def test_edit_upserted_object(self, _set_default_mock, copy_properties_mock, edit_object_mock, get_operation_mock): model_operations = mock.MagicMock() existing_object = mock.MagicMock() params = { 'path_params': {}, 'data': {} } result = self._resource._edit_upserted_object(model_operations, existing_object, params) assert result == edit_object_mock.return_value _set_default_mock.assert_has_calls([ mock.call(params, 'path_params', {}), mock.call(params, 'data', {}) ]) get_operation_mock.assert_called_once_with( self._resource._operation_checker.is_edit_operation, model_operations ) copy_properties_mock.assert_called_once_with( existing_object, params['data'] ) edit_object_mock.assert_called_once_with( get_operation_mock.return_value, params )
def test_get_resources_with_same_credential_names(self, client_mock, display_mock): expected_result = [{ 'RESOURCE_TOKEN_1': 'token-5', 'RESOURCE_TOKEN_2': 'token-6' }] client_mock.return_value.get_resources.return_value = API_FIXTURES[ 'https://api.marketplace.manifold.co/v1/resources?project_id=pid-2'] client_mock.return_value.get_projects.return_value = API_FIXTURES[ 'https://api.marketplace.manifold.co/v1/projects?label=project-2'] client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES[ 'https://api.marketplace.manifold.co/v1/' 'credentials?resource_id={0}'.format(x)] self.assertListEqual( expected_result, self.lookup.run([], api_token='token-123', project='project-2')) client_mock.assert_called_with('token-123') display_mock.warning.assert_has_calls([ call( "'RESOURCE_TOKEN_1' with label 'resource-1' was replaced by resource data with label 'resource-3'" ), call( "'RESOURCE_TOKEN_2' with label 'resource-1' was replaced by resource data with label 'resource-3'" ) ], any_order=True) client_mock.return_value.get_resources.assert_called_with( team_id=None, project_id='pid-2')
def test_change_flag(self): # Flags are set in a second run of parted(). # Between the two runs, the partition dict is updated. # use checkmode here allow us to continue even if the dictionary is # not updated. set_module_args({ 'device': '/dev/sdb', 'number': 3, 'state': 'present', 'flags': ['lvm', 'boot'], '_ansible_check_mode': True, }) with patch('ansible_collections.community.general.plugins.modules.system.parted.get_device_info', return_value=parted_dict1): self.parted.reset_mock() self.execute_module(changed=True) # When using multiple flags: # order of execution is non deterministic, because set() operations are used in # the current implementation. expected_calls_order1 = [call('unit KiB set 3 lvm on set 3 boot on ', '/dev/sdb', 'optimal')] expected_calls_order2 = [call('unit KiB set 3 boot on set 3 lvm on ', '/dev/sdb', 'optimal')] self.assertTrue(self.parted.mock_calls == expected_calls_order1 or self.parted.mock_calls == expected_calls_order2)
def test_iterate_over_pageable_resource_raises_exception_when_server_returned_more_items_than_requested( self): resource_func = mock.Mock(side_effect=[ { 'items': ['foo', 'redundant_bar'] }, { 'items': [] }, ]) with pytest.raises(FtdUnexpectedResponse): list( iterate_over_pageable_resource( resource_func, {'query_params': { 'offset': '1', 'limit': '1' }})) resource_func.assert_has_calls( [call(params={'query_params': { 'offset': '1', 'limit': '1' }})])
def test__argument_spec(self, ansible_mod_cls, _execute_module, _setup_conn, setup_module): expected_arguments_spec = dict( login_user=dict(required=True), login_password=dict(required=True, no_log=True), command_options=dict(default='nodes', choices=['nodes', 'nodes_by_uuid', 'nodes_by_chassis_uuid', 'nodes_status_managed', 'nodes_status_unmanaged']), auth_url=dict(required=True), uuid=dict(default=None), chassis=dict(default=None), ) _setup_conn.return_value = "Fake connection" _execute_module.return_value = [] mod_obj = ansible_mod_cls.return_value args = { "auth_url": "https://10.243.30.195", "login_user": "******", "login_password": "******", "command_options": "nodes", } mod_obj.params = args lxca_nodes.main() assert(mock.call(argument_spec=expected_arguments_spec, supports_check_mode=False) == ansible_mod_cls.call_args)
def test_present(self): set_module_args({ 'name': 'coffee-script', 'global': 'true', 'state': 'present' }) self.module_main_command.side_effect = [ (0, '{}', ''), (0, '{}', ''), ] result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.module_main_command.assert_has_calls([ call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None), call(['/testbin/npm', 'install', '--global', 'coffee-script'], check_rc=True, cwd=None), ])
def test_release_unset(self): # test that the module attempts to change the release when the current # release is not the same as the user-specific target release set_module_args({'release': None}) self.module_main_command.side_effect = [ # first call, get_release: returns version so set_release is called (0, '7.5', ''), # second call, set_release: just needs to exit with 0 rc (0, '', ''), ] result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.assertIsNone(result['current_release']) self.module_main_command.assert_has_calls([ call('/testbin/subscription-manager release --show', check_rc=True), call('/testbin/subscription-manager release --unset', check_rc=True), ])
def test_absent(self): set_module_args({'name': 'nfs-utils', 'state': 'absent'}) self.module_main_command.side_effect = [ (0, '', ''), ] result = self.module_main(AnsibleExitJson) self.assertTrue(result['changed']) self.assertEqual(['nfs-utils'], result['packages']) self.module_main_command.assert_has_calls([ call([ '/testbin/rpm-ostree', 'uninstall', '--allow-inactive', '--idempotent', '--unchanged-exit-77', 'nfs-utils' ]), ])
def test_ensure_feature_is_enabled_called(self): self.set_module_state('present') from ansible_collections.community.general.plugins.modules.network.netscaler import netscaler_cs_policy client_mock = Mock() ensure_feature_is_enabled_mock = Mock() with patch.multiple( 'ansible_collections.community.general.plugins.modules.network.netscaler.netscaler_cs_policy', get_nitro_client=Mock(return_value=client_mock), policy_exists=Mock(side_effect=[True, True]), nitro_exception=self.MockException, ensure_feature_is_enabled=ensure_feature_is_enabled_mock, ): self.module = netscaler_cs_policy result = self.exited() ensure_feature_is_enabled_mock.assert_has_calls([call(client_mock, 'CS')])
def test__nodes_empty_list(self, ansible_mod_cls, _get_nodes, _setup_conn, setup_module): mod_obj = ansible_mod_cls.return_value args = { "auth_url": "https://10.243.30.195", "login_user": "******", "login_password": "******", "uuid": "3C737AA5E31640CE949B10C129A8B01F", "command_options": "nodes_by_uuid", } mod_obj.params = args _setup_conn.return_value = "Fake connection" empty_nodes_list = [] _get_nodes.return_value = empty_nodes_list ret_nodes = _get_nodes(mod_obj, args) assert mock.call(mod_obj, mod_obj.params) == _get_nodes.call_args assert _get_nodes.return_value == ret_nodes
def test_release_unset_idempotent(self): # test that the module attempts to change the release when the current # release is not the same as the user-specific target release set_module_args({'release': None}) self.module_main_command.side_effect = [ # first call, get_release: returns no version, set_release is not called (0, 'Release not set', ''), ] result = self.module_main(AnsibleExitJson) self.assertFalse(result['changed']) self.assertIsNone(result['current_release']) self.module_main_command.assert_has_calls([ call('/testbin/subscription-manager release --show', check_rc=True), ])
def test_release_set_idempotent(self): # test that the module does not attempt to change the release when # the current release matches the user-specified target release set_module_args({'release': '7.5'}) self.module_main_command.side_effect = [ # first call, get_release: returns same version, set_release is not called (0, '7.5', ''), ] result = self.module_main(AnsibleExitJson) self.assertFalse(result['changed']) self.assertEqual('7.5', result['current_release']) self.module_main_command.assert_has_calls([ call('/testbin/subscription-manager release --show', check_rc=True), ])
def test_present_failed(self): set_module_args({'name': 'nfs-utils', 'state': 'present'}) self.module_main_command.side_effect = [ (1, '', ''), ] result = self.module_main(AnsibleFailJson) self.assertFalse(result['changed']) self.assertEqual(1, result['rc']) self.assertEqual(['nfs-utils'], result['packages']) self.module_main_command.assert_has_calls([ call([ '/testbin/rpm-ostree', 'install', '--allow-inactive', '--idempotent', '--unchanged-exit-77', 'nfs-utils' ]), ])
def _test_base(self, module_args, return_value, mock_calls, changed): """Base function that's called by all the other test functions module_args (dict): Arguments passed to the module return_value (dict): Mocked return value of PwPolicyIPAClient.pwpolicy_find, as returned by the IPA API. This should be set to the current state. It will be changed to the desired state using the above arguments. (Technically, this is the return value of _post_json, but it's only checked by pwpolicy_find). An empty dict means that the policy doesn't exist. mock_calls (list/tuple of dicts): List of calls made to PwPolicyIPAClient._post_json, in order. _post_json is called by all of the pwpolicy_* methods of the class. Pass an empty list if no calls are expected. changed (bool): Whether or not the module is supposed to be marked as changed """ set_module_args(module_args) # Run the module with patch_ipa(return_value=return_value) as (mock_login, mock_post): with self.assertRaises(AnsibleExitJson) as exec_info: self.module.main() # Verify that the calls to _post_json match what is expected expected_call_count = len(mock_calls) if expected_call_count > 1: # Convert the call dicts to unittest.mock.call instances because `assert_has_calls` only accepts them converted_calls = [] for call_dict in mock_calls: converted_calls.append(call(**call_dict)) mock_post.assert_has_calls(converted_calls) self.assertEqual(len(mock_post.mock_calls), expected_call_count) elif expected_call_count == 1: mock_post.assert_called_once_with(**mock_calls[0]) else: # expected_call_count is 0 mock_post.assert_not_called() # Verify that the module's changed status matches what is expected self.assertIs(exec_info.exception.args[0]['changed'], changed)
def test_iterate_over_pageable_resource_with_one_page(self): resource_func = mock.Mock(side_effect=[ { 'items': ['foo', 'bar'] }, { 'items': [] }, ]) items = iterate_over_pageable_resource(resource_func, {'query_params': {}}) assert ['foo', 'bar'] == list(items) resource_func.assert_has_calls( [call(params={'query_params': { 'offset': 0, 'limit': 10 }})])
def test_update_db(self, module_args, expected_calls, changed): args = {"update_cache": True} args.update(module_args) set_module_args(args) self.mock_run_command.side_effect = [ (rc, stdout, stderr) for expected_call, kwargs, rc, stdout, stderr in expected_calls ] with pytest.raises(AnsibleExitJson) as e: P = pacman.Pacman(pacman.setup_module()) P.run() self.mock_run_command.assert_has_calls([ mock.call(mock.ANY, expected_call, **kwargs) for expected_call, kwargs, rc, stdout, stderr in expected_calls ]) out = e.value.args[0] assert out["cache_updated"] == changed assert out["changed"] == changed
def test_iterate_over_pageable_resource_should_preserve_limit(self): resource_func = mock.Mock(side_effect=[ { 'items': ['foo'] }, { 'items': [] }, ]) items = iterate_over_pageable_resource(resource_func, {'query_params': { 'limit': 1 }}) assert ['foo'] == list(items) resource_func.assert_has_calls( [call(params={'query_params': { 'offset': 0, 'limit': 1 }})])
def test_get_objects_by_filter_with_multiple_responses( self, send_request_mock, fetch_system_info_mock, connection_mock): send_request_mock.side_effect = [{ 'items': [{ 'name': 'obj1', 'type': 'foo' }, { 'name': 'obj2', 'type': 'bar' }] }, { 'items': [{ 'name': 'obj3', 'type': 'foo' }] }, { 'items': [] }] fetch_system_info_mock.return_value = { 'databaseInfo': { 'buildVersion': '6.3.0' } } connection_mock.get_operation_spec.return_value = { 'method': HTTPMethod.GET, 'url': '/object/' } resource = BaseConfigurationResource(connection_mock, False) assert [{ 'name': 'obj1', 'type': 'foo' }] == list( resource.get_objects_by_filter( 'test', {ParamName.FILTERS: { 'type': 'foo' }})) send_request_mock.assert_has_calls( [mock.call('/object/', 'get', {}, {}, { 'limit': 10, 'offset': 0 })]) send_request_mock.reset_mock() send_request_mock.side_effect = [{ 'items': [{ 'name': 'obj1', 'type': 'foo' }, { 'name': 'obj2', 'type': 'bar' }] }, { 'items': [{ 'name': 'obj3', 'type': 'foo' }] }, { 'items': [] }] resp = list( resource.get_objects_by_filter( 'test', { ParamName.FILTERS: { 'type': 'foo' }, ParamName.QUERY_PARAMS: { 'limit': 2 } })) assert [{ 'name': 'obj1', 'type': 'foo' }, { 'name': 'obj3', 'type': 'foo' }] == resp send_request_mock.assert_has_calls([ mock.call('/object/', 'get', {}, {}, { 'limit': 2, 'offset': 0 }), mock.call('/object/', 'get', {}, {}, { 'limit': 2, 'offset': 2 }) ])
def test_get_objects_by_filter_with_multiple_filters( self, send_request_mock, fetch_system_info_mock, connection_mock): objects = [{ 'name': 'obj1', 'type': 1, 'foo': { 'bar': 'buzz' } }, { 'name': 'obj2', 'type': 1, 'foo': { 'bar': 'buz' } }, { 'name': 'obj3', 'type': 2, 'foo': { 'bar': 'buzz' } }] fetch_system_info_mock.return_value = { 'databaseInfo': { 'buildVersion': '6.3.0' } } connection_mock.get_operation_spec.return_value = { 'method': HTTPMethod.GET, 'url': '/object/' } resource = BaseConfigurationResource(connection_mock, False) send_request_mock.side_effect = [{'items': objects}, {'items': []}] # resource.get_objects_by_filter returns generator so to be able compare generated list with expected list # we need evaluate it. assert objects == list(resource.get_objects_by_filter('test', {})) send_request_mock.assert_has_calls( [mock.call('/object/', 'get', {}, {}, { 'limit': 10, 'offset': 0 })]) send_request_mock.reset_mock() send_request_mock.side_effect = [{'items': objects}, {'items': []}] # resource.get_objects_by_filter returns generator so to be able compare generated list with expected list # we need evaluate it. assert [objects[0]] == list( resource.get_objects_by_filter( 'test', {ParamName.FILTERS: { 'name': 'obj1' }})) send_request_mock.assert_has_calls([ mock.call('/object/', 'get', {}, {}, { QueryParams.FILTER: 'name:obj1', 'limit': 10, 'offset': 0 }) ]) send_request_mock.reset_mock() send_request_mock.side_effect = [{'items': objects}, {'items': []}] # resource.get_objects_by_filter returns generator so to be able compare generated list with expected list # we need evaluate it. assert [objects[1]] == list( resource.get_objects_by_filter( 'test', { ParamName.FILTERS: { 'name': 'obj2', 'type': 1, 'foo': { 'bar': 'buz' } } })) send_request_mock.assert_has_calls([ mock.call('/object/', 'get', {}, {}, { QueryParams.FILTER: 'name:obj2', 'limit': 10, 'offset': 0 }) ])
class TestPacman: @pytest.fixture(autouse=True) def run_command(self, mocker): self.mock_run_command = mocker.patch.object(basic.AnsibleModule, "run_command", autospec=True) @pytest.fixture def mock_package_list(self, mocker): return mocker.patch.object(pacman.Pacman, "package_list", autospec=True) @pytest.fixture(autouse=True) def common(self, mocker): self.mock_module = mocker.patch.multiple( basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json, get_bin_path=get_bin_path, ) @pytest.fixture def mock_empty_inventory(self, mocker): inv = empty_inventory return mocker.patch.object(pacman.Pacman, "_build_inventory", return_value=inv) @pytest.fixture def mock_valid_inventory(self, mocker): return mocker.patch.object(pacman.Pacman, "_build_inventory", return_value=valid_inventory) def test_fail_without_required_args(self): with pytest.raises(AnsibleFailJson) as e: set_module_args({}) pacman.main() assert e.match(r"one of the following is required") def test_success(self, mock_empty_inventory): set_module_args({"update_cache": True}) # Simplest args to let init go through P = pacman.Pacman(pacman.setup_module()) with pytest.raises(AnsibleExitJson) as e: P.success() def test_fail(self, mock_empty_inventory): set_module_args({"update_cache": True}) P = pacman.Pacman(pacman.setup_module()) args = dict(msg="msg", stdout="something", stderr="somethingelse", cmd=["command", "with", "args"], rc=1) with pytest.raises(AnsibleFailJson) as e: P.fail(**args) assert all(item in e.value.args[0] for item in args) @pytest.mark.parametrize( "expected, run_command_side_effect, raises", [ ( # Regular run valid_inventory, [ [ # pacman --query 0, """file 5.41-1 filesystem 2021.11.11-1 findutils 4.8.0-1 gawk 5.1.1-1 gettext 0.21-1 grep 3.7-1 gzip 1.11-1 pacman 6.0.1-2 pacman-mirrorlist 20211114-1 sed 4.8-1 sqlite 3.36.0-1 """, "", ], ( # pacman --query --group 0, """base-devel file base-devel findutils base-devel gawk base-devel gettext base-devel grep base-devel gzip base-devel pacman base-devel sed """, "", ), ( # pacman --sync --list 0, """core acl 2.3.1-1 [installed] core amd-ucode 20211027.1d00989-1 core archlinux-keyring 20211028-1 [installed] core argon2 20190702-3 [installed] core attr 2.5.1-1 [installed] core audit 3.0.6-5 [installed: 3.0.6-2] core autoconf 2.71-1 core automake 1.16.5-1 core b43-fwcutter 019-3 core gawk 5.1.1-1 [installed] core grep 3.7-1 [installed] core sqlite 3.37.0-1 [installed: 3.36.0-1] code sudo 1.9.8.p2-3 """, "", ), ( # pacman --sync --group --group 0, """base-devel autoconf base-devel automake base-devel binutils base-devel bison base-devel fakeroot base-devel file base-devel findutils base-devel flex base-devel gawk base-devel gcc base-devel gettext base-devel grep base-devel groff base-devel gzip base-devel libtool base-devel m4 base-devel make base-devel pacman base-devel patch base-devel pkgconf base-devel sed base-devel sudo base-devel texinfo base-devel which some-group libtool some-group sudo some-group binutils """, "", ), ( # pacman --query --upgrades 0, """sqlite 3.36.0-1 -> 3.37.0-1 systemd 249.6-3 -> 249.7-2 [ignored] """, "", ), ], None, ), ( # All good, but call to --query --upgrades return 1. aka nothing to upgrade # with a pacman warning empty_inventory, [ (0, "", ""), (0, "", ""), (0, "", ""), (0, "", ""), ( 1, "", "warning: config file /etc/pacman.conf, line 34: directive 'TotalDownload' in section 'options' not recognized.", ), ], None, ), ( # failure empty_inventory, [ (0, "", ""), (0, "", ""), (0, "", ""), (0, "", ""), ( 1, "partial\npkg\\nlist", "some warning", ), ], AnsibleFailJson, ), ], ) def test_build_inventory(self, expected, run_command_side_effect, raises): self.mock_run_command.side_effect = run_command_side_effect set_module_args({"update_cache": True}) if raises: with pytest.raises(raises): P = pacman.Pacman(pacman.setup_module()) P._build_inventory() else: P = pacman.Pacman(pacman.setup_module()) assert P._build_inventory() == expected @pytest.mark.parametrize("check_mode_value", [True, False]) def test_upgrade_check_empty_inventory(self, mock_empty_inventory, check_mode_value): set_module_args({ "upgrade": True, "_ansible_check_mode": check_mode_value }) P = pacman.Pacman(pacman.setup_module()) with pytest.raises(AnsibleExitJson) as e: P.run() self.mock_run_command.call_count == 0 out = e.value.args[0] assert "packages" not in out assert not out["changed"] assert "diff" not in out def test_update_db_check(self, mock_empty_inventory): set_module_args({"update_cache": True, "_ansible_check_mode": True}) P = pacman.Pacman(pacman.setup_module()) with pytest.raises(AnsibleExitJson) as e: P.run() self.mock_run_command.call_count == 0 out = e.value.args[0] assert out["changed"] @pytest.mark.parametrize( "module_args,expected_call", [ ({}, ["pacman", "--sync", "--refresh"]), ({ "force": True }, ["pacman", "--sync", "--refresh", "--refresh"]), ( { "update_cache_extra_args": "--some-extra args" }, ["pacman", "--sync", "--refresh", "--some-extra", "args" ], # shlex test ), ( { "force": True, "update_cache_extra_args": "--some-extra args" }, [ "pacman", "--sync", "--refresh", "--some-extra", "args", "--refresh" ], ), ], ) def test_update_db(self, mock_empty_inventory, module_args, expected_call): args = {"update_cache": True} args.update(module_args) set_module_args(args) self.mock_run_command.return_value = [0, "stdout", "stderr"] with pytest.raises(AnsibleExitJson) as e: P = pacman.Pacman(pacman.setup_module()) P.run() self.mock_run_command.assert_called_with(mock.ANY, expected_call, check_rc=False) out = e.value.args[0] assert out["changed"] @pytest.mark.parametrize( "check_mode_value, run_command_data, upgrade_extra_args", [ # just check (True, None, None), ( # for real False, { "args": [ "pacman", "--sync", "--sys-upgrade", "--quiet", "--noconfirm" ], "return_value": [0, "stdout", "stderr"], }, None, ), ( # with extra args False, { "args": [ "pacman", "--sync", "--sys-upgrade", "--quiet", "--noconfirm", "--some", "value", ], "return_value": [0, "stdout", "stderr"], }, "--some value", ), ], ) def test_upgrade(self, mock_valid_inventory, check_mode_value, run_command_data, upgrade_extra_args): args = {"upgrade": True, "_ansible_check_mode": check_mode_value} if upgrade_extra_args: args["upgrade_extra_args"] = upgrade_extra_args set_module_args(args) if run_command_data and "return_value" in run_command_data: self.mock_run_command.return_value = run_command_data[ "return_value"] P = pacman.Pacman(pacman.setup_module()) with pytest.raises(AnsibleExitJson) as e: P.run() out = e.value.args[0] if check_mode_value: self.mock_run_command.call_count == 0 if run_command_data and "args" in run_command_data: self.mock_run_command.assert_called_with(mock.ANY, run_command_data["args"], check_rc=False) assert out["stdout"] == "stdout" assert out["stderr"] == "stderr" assert len(out["packages"]) == 1 and "sqlite" in out["packages"] assert out["changed"] assert out["diff"]["before"] and out["diff"]["after"] def test_upgrade_fail(self, mock_valid_inventory): set_module_args({"upgrade": True}) self.mock_run_command.return_value = [1, "stdout", "stderr"] P = pacman.Pacman(pacman.setup_module()) with pytest.raises(AnsibleFailJson) as e: P.run() self.mock_run_command.call_count == 1 out = e.value.args[0] assert out["failed"] assert out["stdout"] == "stdout" assert out["stderr"] == "stderr" @pytest.mark.parametrize( "state, pkg_names, expected, run_command_data, raises", [ # regular packages, no resolving required ( "present", ["acl", "attr"], [ Package(name="acl", source="acl"), Package(name="attr", source="attr") ], None, None, ), ( # group expansion "present", ["acl", "some-group", "attr"], [ Package(name="acl", source="acl"), Package(name="binutils", source="binutils"), Package(name="libtool", source="libtool"), Package(name="sudo", source="sudo"), Package(name="attr", source="attr"), ], None, None, ), ( # <repo>/<pkgname> format -> call to pacman to resolve "present", ["community/elixir"], [Package(name="elixir", source="community/elixir")], { "calls": [ mock.call( mock.ANY, [ "pacman", "--sync", "--print-format", "%n", "community/elixir" ], check_rc=False, ) ], "side_effect": [(0, "elixir", "")], }, None, ), ( # catch all -> call to pacman to resolve (--sync and --upgrade) "present", ["somepackage-12.3-x86_64.pkg.tar.zst"], [ Package(name="somepackage", source="somepackage-12.3-x86_64.pkg.tar.zst") ], { "calls": [ mock.call( mock.ANY, [ "pacman", "--sync", "--print-format", "%n", "somepackage-12.3-x86_64.pkg.tar.zst", ], check_rc=False, ), mock.call( mock.ANY, [ "pacman", "--upgrade", "--print-format", "%n", "somepackage-12.3-x86_64.pkg.tar.zst", ], check_rc=False, ), ], "side_effect": [(1, "", "nope"), (0, "somepackage", "")], }, None, ), ( # install a package that doesn't exist. call pacman twice and give up "present", ["unknown-package"], [], { # no call validation, since it will fail "side_effect": [(1, "", "nope"), (1, "", "stillnope")], }, AnsibleFailJson, ), ( # Edge case: resolve a pkg that doesn't exist when trying to remove it (state == absent). # will fallback to file + url format but not complain since it is already not there # Can happen if a pkg is removed for the repos (or if a repo is disabled/removed) "absent", ["unknown-package-to-remove"], [], { "calls": [ mock.call( mock.ANY, [ "pacman", "--sync", "--print-format", "%n", "unknown-package-to-remove" ], check_rc=False, ), mock.call( mock.ANY, [ "pacman", "--upgrade", "--print-format", "%n", "unknown-package-to-remove" ], check_rc=False, ), ], "side_effect": [(1, "", "nope"), (1, "", "stillnope")], }, None, # Doesn't fail ), ], ) def test_package_list(self, mock_valid_inventory, state, pkg_names, expected, run_command_data, raises): set_module_args({"name": pkg_names, "state": state}) P = pacman.Pacman(pacman.setup_module()) P.inventory = P._build_inventory() if run_command_data: self.mock_run_command.side_effect = run_command_data["side_effect"] if raises: with pytest.raises(raises): P.package_list() else: assert sorted(P.package_list()) == sorted(expected) if run_command_data: assert self.mock_run_command.mock_calls == run_command_data[ "calls"] @pytest.mark.parametrize("check_mode_value", [True, False]) @pytest.mark.parametrize( "name, state, package_list", [ (["already-absent" ], "absent", [Package("already-absent", "already-absent")]), (["grep"], "present", [Package("grep", "grep")]), ], ) def test_op_packages_nothing_to_do(self, mock_valid_inventory, mock_package_list, check_mode_value, name, state, package_list): set_module_args({ "name": name, "state": state, "_ansible_check_mode": check_mode_value }) mock_package_list.return_value = package_list P = pacman.Pacman(pacman.setup_module()) with pytest.raises(AnsibleExitJson) as e: P.run() out = e.value.args[0] assert "packages" not in out assert not out["changed"] assert "diff" not in out self.mock_run_command.call_count == 0 @pytest.mark.parametrize( "module_args, expected_packages, run_command_data, raises", [ ( # remove pkg: Check mode -- call to print format but that's it { "_ansible_check_mode": True, "name": ["grep"], "state": "absent" }, ["grep-version"], { "calls": [ mock.call( mock.ANY, [ "pacman", "--remove", "--noconfirm", "--noprogressbar", "--print-format", "%n-%v", "grep", ], check_rc=False, ), ], "side_effect": [(0, "grep-version", "")], }, AnsibleExitJson, ), ( # remove pkg for real now -- with 2 packages { "name": ["grep", "gawk"], "state": "absent" }, ["grep-version", "gawk-anotherversion"], { "calls": [ mock.call( mock.ANY, [ "pacman", "--remove", "--noconfirm", "--noprogressbar", "--print-format", "%n-%v", "grep", "gawk", ], check_rc=False, ), mock.call( mock.ANY, [ "pacman", "--remove", "--noconfirm", "--noprogressbar", "grep", "gawk" ], check_rc=False, ), ], "side_effect": [ (0, "grep-version\ngawk-anotherversion", ""), (0, "stdout", "stderr"), ], }, AnsibleExitJson, ), ( # remove pkg force + extra_args { "name": ["grep"], "state": "absent", "force": True, "extra_args": "--some --extra arg", }, ["grep-version"], { "calls": [ mock.call( mock.ANY, [ "pacman", "--remove", "--noconfirm", "--noprogressbar", "--some", "--extra", "arg", "--nodeps", "--nodeps", "--print-format", "%n-%v", "grep", ], check_rc=False, ), mock.call( mock.ANY, [ "pacman", "--remove", "--noconfirm", "--noprogressbar", "--some", "--extra", "arg", "--nodeps", "--nodeps", "grep", ], check_rc=False, ), ], "side_effect": [ (0, "grep-version", ""), (0, "stdout", "stderr"), ], }, AnsibleExitJson, ), ( # remove pkg -- Failure to list { "name": ["grep"], "state": "absent" }, ["grep-3.7-1"], { "calls": [ mock.call( mock.ANY, [ "pacman", "--remove", "--noconfirm", "--noprogressbar", "--print-format", "%n-%v", "grep", ], check_rc=False, ) ], "side_effect": [ (1, "stdout", "stderr"), ], }, AnsibleFailJson, ), ( # remove pkg -- Failure to remove { "name": ["grep"], "state": "absent" }, ["grep-3.7-1"], { "calls": [ mock.call( mock.ANY, [ "pacman", "--remove", "--noconfirm", "--noprogressbar", "--print-format", "%n-%v", "grep", ], check_rc=False, ), mock.call( mock.ANY, [ "pacman", "--remove", "--noconfirm", "--noprogressbar", "grep" ], check_rc=False, ), ], "side_effect": [ (0, "grep", ""), (1, "stdout", "stderr"), ], }, AnsibleFailJson, ), ( # install pkg: Check mode { "_ansible_check_mode": True, "name": ["sudo"], "state": "present" }, ["sudo"], { "calls": [ mock.call( mock.ANY, [ "pacman", "--sync", "--noconfirm", "--noprogressbar", "--needed", "--print-format", "%n %v", "sudo", ], check_rc=False, ), ], "side_effect": [(0, "sudo version", "")], }, AnsibleExitJson, ), ( # install 2 pkgs, one already present { "name": ["sudo", "grep"], "state": "present" }, ["sudo"], { "calls": [ mock.call( mock.ANY, [ "pacman", "--sync", "--noconfirm", "--noprogressbar", "--needed", "--print-format", "%n %v", "sudo", ], check_rc=False, ), mock.call( mock.ANY, [ "pacman", "--sync", "--noconfirm", "--noprogressbar", "--needed", "sudo", ], check_rc=False, ), ], "side_effect": [(0, "sudo version", ""), (0, "", "")], }, AnsibleExitJson, ), ( # install pkg, extra_args { "name": ["sudo"], "state": "present", "extra_args": "--some --thing else" }, ["sudo"], { "calls": [ mock.call( mock.ANY, [ "pacman", "--sync", "--noconfirm", "--noprogressbar", "--needed", "--some", "--thing", "else", "--print-format", "%n %v", "sudo", ], check_rc=False, ), mock.call( mock.ANY, [ "pacman", "--sync", "--noconfirm", "--noprogressbar", "--needed", "--some", "--thing", "else", "sudo", ], check_rc=False, ), ], "side_effect": [(0, "sudo version", ""), (0, "", "")], }, AnsibleExitJson, ), ( # latest pkg: Check mode { "_ansible_check_mode": True, "name": ["sqlite"], "state": "latest" }, ["sqlite"], { "calls": [ mock.call( mock.ANY, [ "pacman", "--sync", "--noconfirm", "--noprogressbar", "--needed", "--print-format", "%n %v", "sqlite", ], check_rc=False, ), ], "side_effect": [(0, "sqlite new-version", "")], }, AnsibleExitJson, ), ( # latest pkg -- one already latest { "name": ["sqlite", "grep"], "state": "latest" }, ["sqlite"], { "calls": [ mock.call( mock.ANY, [ "pacman", "--sync", "--noconfirm", "--noprogressbar", "--needed", "--print-format", "%n %v", "sqlite", ], check_rc=False, ), mock.call( mock.ANY, [ "pacman", "--sync", "--noconfirm", "--noprogressbar", "--needed", "sqlite", ], check_rc=False, ), ], "side_effect": [(0, "sqlite new-version", ""), (0, "", "")], }, AnsibleExitJson, ), ], ) def test_op_packages( self, mock_valid_inventory, module_args, expected_packages, run_command_data, raises, ): set_module_args(module_args) self.mock_run_command.side_effect = run_command_data["side_effect"] P = pacman.Pacman(pacman.setup_module()) with pytest.raises(raises) as e: P.run() out = e.value.args[0] assert self.mock_run_command.mock_calls == run_command_data["calls"] if raises == AnsibleExitJson: assert out["packages"] == expected_packages assert out["changed"] assert "diff" in out else: assert out["stdout"] == "stdout" assert out["stderr"] == "stderr"