def add_pritunl_organization(module): result = {} org_name = module.params.get("name") org_obj_list = list_pritunl_organizations(**dict_merge( get_pritunl_settings(module), {"filters": { "name": org_name }}, )) # If the organization already exists if len(org_obj_list) > 0: result["changed"] = False result["response"] = org_obj_list[0] else: # Otherwise create it response = post_pritunl_organization(**dict_merge( get_pritunl_settings(module), {"organization_name": org_name}, )) result["changed"] = True result["response"] = response module.exit_json(**result)
def remove_pritunl_user(module): result = {} org_name = module.params.get("organization") user_name = module.params.get("user_name") org_obj_list = [] org_obj_list = list_pritunl_organizations(**dict_merge( get_pritunl_settings(module), { "filters": { "name": org_name }, }, )) if len(org_obj_list) == 0: module.fail_json( msg="Can not remove user '%s' from a non existing organization '%s'" % (user_name, org_name)) org_id = org_obj_list[0]["id"] # Grab existing users from this org users = list_pritunl_users(**dict_merge( get_pritunl_settings(module), { "organization_id": org_id, "filters": { "name": user_name }, }, )) # Check if the pritunl user exists, if not, do nothing if len(users) == 0: result["changed"] = False result["response"] = {} # Otherwise remove the org from Pritunl else: response = delete_pritunl_user(**dict_merge( get_pritunl_settings(module), { "organization_id": org_id, "user_id": users[0]["id"], }, )) result["changed"] = True result["response"] = response module.exit_json(**result)
def test_add_and_update_pritunl_user( self, pritunl_settings, pritunl_user_data, post_pritunl_user_mock, put_pritunl_user_mock, org_id, ): api._post_pritunl_user = post_pritunl_user_mock() api._put_pritunl_user = put_pritunl_user_mock() create_response = api.post_pritunl_user( **dict_merge( pritunl_settings, { "organization_id": org_id, "user_data": pritunl_user_data, }, ) ) # Ensure provided settings match with the ones returned by Pritunl for k, v in iteritems(pritunl_user_data): assert create_response[k] == v # Update the newly created user to ensure only certain settings are changed user_updates = { "name": "bob", "email": "*****@*****.**", "disabled": True, } update_response = api.post_pritunl_user( **dict_merge( pritunl_settings, { "organization_id": org_id, "user_id": create_response["id"], "user_data": dict_merge(pritunl_user_data, user_updates), }, ) ) # Ensure only certain settings changed and the rest remained untouched. for k, v in iteritems(update_response): if k in update_response: assert update_response[k] == v else: assert update_response[k] == create_response[k]
def apply_patch(actual, desired): last_applied = actual['metadata'].get('annotations', {}).get(LAST_APPLIED_CONFIG_ANNOTATION) if last_applied: # ensure that last_applied doesn't come back as a dict of unicode key/value pairs # json.loads can be used if we stop supporting python 2 last_applied = json.loads(last_applied) patch = merge(dict_merge(last_applied, annotate(last_applied)), dict_merge(desired, annotate(desired)), actual) if patch: return actual, patch else: return actual, actual else: return actual, dict_merge(desired, annotate(desired))
def test_merge_sub_dicts(self): '''merge sub dicts ''' a = {'a': {'a1': 1}} b = {'a': {'b1': 2}} c = {'a': {'a1': 1, 'b1': 2}} res = dict_merge(a, b) assert res == c
def test_merge_sub_dicts(self): '''merge sub dicts ''' a = {'a': {'a1': 1}} b = {'a': {'b1': 2}} c = {'a': {'a1': 1, 'b1': 2}} res = dict_merge(a, b) self.assertEqual(res, c)
def list_merge(last_applied, actual, desired, position): result = list() if position in STRATEGIC_MERGE_PATCH_KEYS and last_applied: patch_merge_key = STRATEGIC_MERGE_PATCH_KEYS[position] last_applied_dict = list_to_dict(last_applied, patch_merge_key, position) actual_dict = list_to_dict(actual, patch_merge_key, position) desired_dict = list_to_dict(desired, patch_merge_key, position) for key in desired_dict: if key not in actual_dict or key not in last_applied_dict: result.append(desired_dict[key]) else: patch = merge( last_applied_dict[key], desired_dict[key], actual_dict[key], position, ) result.append(dict_merge(actual_dict[key], patch)) for key in actual_dict: if key not in desired_dict and key not in last_applied_dict: result.append(actual_dict[key]) return result else: return desired
def _set_data_entry(self, container_name, key, value, path=None): """Helper to save data Helper to save the data in self.data Detect if data is allready in branch and use dict_merge() to prevent that branch is overwritten. Args: str(container_name): name of container str(key): same as dict *(value): same as dict Kwargs: str(path): path to branch-part Raises: AnsibleParserError Returns: None""" if not path: path = self.data['inventory'] if container_name not in path: path[container_name] = {} try: if isinstance(value, dict) and key in path[container_name]: path[container_name] = dict_merge(value, path[container_name][key]) else: path[container_name][key] = value except KeyError as err: raise AnsibleParserError( "Unable to store Informations: {0}".format(to_native(err)))
def remove_pritunl_organization(module): result = {} org_name = module.params.get("name") force = module.params.get("force") org_obj_list = [] org_obj_list = list_pritunl_organizations( **dict_merge( get_pritunl_settings(module), { "filters": {"name": org_name}, }, ) ) # No organization found if len(org_obj_list) == 0: result["changed"] = False result["response"] = {} else: # Otherwise attempt to delete it org = org_obj_list[0] # Only accept deletion under specific conditions if force or org["user_count"] == 0: response = delete_pritunl_organization( **dict_merge( get_pritunl_settings(module), {"organization_id": org["id"]}, ) ) result["changed"] = True result["response"] = response else: module.fail_json( msg=( "Can not remove organization '%s' with %d attached users. " "Either set 'force' option to true or remove active users " "from the organization" ) % (org_name, org["user_count"]) ) module.exit_json(**result)
def test_dict_merge(self): base = dict(obj2=dict(), b1=True, b2=False, b3=False, one=1, two=2, three=3, obj1=dict(key1=1, key2=2), l1=[1, 3], l2=[1, 2, 3], l4=[4], nested=dict(n1=dict(n2=2))) other = dict(b1=True, b2=False, b3=True, b4=True, one=1, three=4, four=4, obj1=dict(key1=2), l1=[2, 1], l2=[3, 2, 1], l3=[1], nested=dict(n1=dict(n2=2, n3=3))) result = dict_merge(base, other) # string assertions self.assertTrue('one' in result) self.assertTrue('two' in result) self.assertEqual(result['three'], 4) self.assertEqual(result['four'], 4) # dict assertions self.assertTrue('obj1' in result) self.assertTrue('key1' in result['obj1']) self.assertTrue('key2' in result['obj1']) # list assertions # this line differs from the network_utils/common test of the function of the # same name as this method does not merge lists self.assertEqual(result['l1'], [2, 1]) self.assertTrue('l2' in result) self.assertEqual(result['l3'], [1]) self.assertTrue('l4' in result) # nested assertions self.assertTrue('obj1' in result) self.assertEqual(result['obj1']['key1'], 2) self.assertTrue('key2' in result['obj1']) # bool assertions self.assertTrue('b1' in result) self.assertTrue('b2' in result) self.assertTrue(result['b3']) self.assertTrue(result['b4'])
def test_list_all_pritunl_user(self, pritunl_settings, get_pritunl_user_mock, org_id, org_user_count): api._get_pritunl_users = get_pritunl_user_mock() response = api.list_pritunl_users( **dict_merge(pritunl_settings, {"organization_id": org_id})) assert len(response) == org_user_count
def apply_object(resource, definition): try: actual = resource.get( name=definition["metadata"]["name"], namespace=definition["metadata"].get("namespace"), ) except NotFoundError: return None, dict_merge(definition, annotate(definition)) return apply_patch(actual.to_dict(), definition)
def merge_dicts(base_dict, merging_dicts): """This function merges a base dictionary with one or more other dictionaries. The base dictionary takes precedence when there is a key collision. merging_dicts can be a dict or a list or tuple of dicts. In the latter case, the dictionaries at the front of the list have higher precedence over the ones at the end. """ if not merging_dicts: merging_dicts = ({}, ) if not isinstance(merging_dicts, Sequence): merging_dicts = (merging_dicts, ) new_dict = {} for d in reversed(merging_dicts): new_dict = dict_transformations.dict_merge(new_dict, d) new_dict = dict_transformations.dict_merge(new_dict, base_dict) return new_dict
def get_pritunl_user(module): user_name = module.params.get("user_name") user_type = module.params.get("user_type") org_name = module.params.get("organization") org_obj_list = [] org_obj_list = list_pritunl_organizations(**dict_merge( get_pritunl_settings(module), {"filters": { "name": org_name }})) if len(org_obj_list) == 0: module.fail_json( msg= "Can not list users from the organization '%s' which does not exist" % org_name) org_id = org_obj_list[0]["id"] users = list_pritunl_users(**dict_merge( get_pritunl_settings(module), { "organization_id": org_id, "filters": ({ "type": user_type } if user_name is None else { "name": user_name, "type": user_type }), }, )) result = {} result["changed"] = False result["users"] = users module.exit_json(**result)
def test_delete_pritunl_user(self, pritunl_settings, org_id, user_id, delete_pritunl_user_mock): api._delete_pritunl_user = delete_pritunl_user_mock() response = api.delete_pritunl_user(**dict_merge( pritunl_settings, { "organization_id": org_id, "user_id": user_id, }, )) assert response == {}
def test_list_filtered_pritunl_organization( self, pritunl_settings, get_pritunl_organization_mock, org_filters, org_expected, ): api._get_pritunl_organizations = get_pritunl_organization_mock() response = api.list_pritunl_organizations( **dict_merge(pritunl_settings, {"filters": org_filters})) assert len(response) == 1 assert response[0]["name"] == org_expected
def compare_arrays(old_params, new_params, param_name): old = old_params.get(param_name) or [] new = new_params.get(param_name) or [] oldd = {} for item in old: name = item['name'] oldd[name] = item newd = {} for item in new: name = item['name'] newd[name] = item newd = dict_merge(oldd, newd) return newd == oldd
def test_present(self): """Test Pritunl organization creation.""" org_params = {"name": "NewOrg"} set_module_args( dict_merge( { "pritunl_api_token": "token", "pritunl_api_secret": "secret", "pritunl_url": "https://pritunl.domain.com", }, org_params, ) ) # Test creation with self.patch_get_pritunl_organizations( side_effect=PritunlListOrganizationMock ) as mock_get: with self.patch_add_pritunl_organization( side_effect=PritunlPostOrganizationMock ) as mock_add: with self.assertRaises(AnsibleExitJson) as create_result: self.module.main() create_exc = create_result.exception.args[0] self.assertTrue(create_exc["changed"]) self.assertEqual(create_exc["response"]["name"], org_params["name"]) self.assertEqual(create_exc["response"]["user_count"], 0) # Test module idempotency with self.patch_get_pritunl_organizations( side_effect=PritunlListOrganizationAfterPostMock ) as mock_get: with self.patch_add_pritunl_organization( side_effect=PritunlPostOrganizationMock ) as mock_add: with self.assertRaises(AnsibleExitJson) as idempotent_result: self.module.main() idempotent_exc = idempotent_result.exception.args[0] # Ensure both calls resulted in the same returned value # except for changed which sould be false the second time for k, v in iteritems(idempotent_exc): if k == "changed": self.assertFalse(idempotent_exc[k]) else: self.assertEqual(create_exc[k], idempotent_exc[k])
def remove(manager, name, module): if manager == 'apt': with apt.Cache() as cache: if name not in cache: # package is absent already return False cmd = "apt-get remove -y '{name}'".format(name=name) module.run_command(cmd, check_rc=True, environ_update=dict_merge(ENV_VARS, APT_ENV_VARS)) return True elif manager == 'dnf': with dnf.Base() as base: base.read_all_repos() base.fill_sack(load_system_repo=True, load_available_repos=False) q = base.sack.query() if not q.installed().filter(name=name).run(): # package is absend already return False # Removal using dnf CLI # cmd = "dnf autoremove -y '{name}'".format(name=name) # module.run_command(cmd, check_rc=True, environ_update=ENV_VARS) # return True # Removal using dnf API base.conf.clean_requirements_on_remove = True base.remove(name) base.resolve(allow_erasing=True) base.do_transaction() return True elif manager == 'yum': yb = yum.YumBase() if not yb.rpmdb.searchNevra(name=name): # package is absent already return False cmd = "yum autoremove -y '{name}'".format(name=name) module.run_command(cmd, check_rc=True, environ_update=ENV_VARS) return True # else manager not in [ 'apt', 'dnf', 'yum' ] return False
def output(self): result = dict(self.vars.output()) if self.facts_name: facts = self.vars.facts() if facts is not None: result['ansible_facts'] = {self.facts_name: facts} if self.diff_mode: diff = result.get('diff', {}) vars_diff = self.vars.diff() or {} result['diff'] = dict_merge(dict(diff), vars_diff) for varname in result: if varname in self._output_conflict_list: result["_" + varname] = result[varname] del result[varname] return result
def to_nested_dict(vm_properties): """ Parse properties from dot notation to dict """ host_properties = {} for vm_prop_name, vm_prop_val in vm_properties.items(): prop_parents = reversed(vm_prop_name.split(".")) prop_dict = parse_vim_property(vm_prop_val) for k in prop_parents: prop_dict = {k: prop_dict} host_properties = dict_merge(host_properties, prop_dict) return host_properties
def make_deb(architecture, conflicts, cwd, depends, description, enhances, maintainer, manager, name, recommends, suggests, summary, version, module): debian_path = os.path.join(cwd, name, 'DEBIAN') os.makedirs(debian_path, mode=0o755) control_path = os.path.join(debian_path, 'control') control_template = jinja2.Template(DEBIAN_CONTROLFILE_TEMPLATE) control_content = control_template.render( architecture=architecture, conflicts=conflicts, depends=depends, description=description, enhances=enhances, maintainer=maintainer, name=name, recommends=recommends, suggests=suggests, summary=summary, version=version) with open(control_path, 'w') as f: f.write(control_content) # Ref.: man dpkg-deb cmd = "dpkg-deb --build '{binary_directory}'".format(binary_directory=name) module.run_command(cmd, check_rc=True, cwd=cwd, environ_update=dict_merge(ENV_VARS, APT_ENV_VARS)) pkg_filename = '{name}.deb'.format(name=name) module.debug('package filename: %s' % pkg_filename) pkg_path = os.path.join(cwd, pkg_filename) return pkg_path
def test_absent(self): """Test organization removal from Pritunl.""" org_params = {"name": "NewOrg"} set_module_args( dict_merge( { "state": "absent", "pritunl_api_token": "token", "pritunl_api_secret": "secret", "pritunl_url": "https://pritunl.domain.com", }, org_params, ) ) # Test deletion with self.patch_get_pritunl_organizations( side_effect=PritunlListOrganizationAfterPostMock ) as mock_get: with self.patch_delete_pritunl_organization( side_effect=PritunlDeleteOrganizationMock ) as mock_delete: with self.assertRaises(AnsibleExitJson) as delete_result: self.module.main() delete_exc = delete_result.exception.args[0] self.assertTrue(delete_exc["changed"]) self.assertEqual(delete_exc["response"], {}) # Test module idempotency with self.patch_get_pritunl_organizations( side_effect=PritunlListOrganizationMock ) as mock_get: with self.patch_delete_pritunl_organization( side_effect=PritunlDeleteOrganizationMock ) as mock_add: with self.assertRaises(AnsibleExitJson) as idempotent_result: self.module.main() idempotent_exc = idempotent_result.exception.args[0] # Ensure both calls resulted in the same returned value # except for changed which sould be false the second time self.assertFalse(idempotent_exc["changed"]) self.assertEqual(idempotent_exc["response"], delete_exc["response"])
def test_add_pritunl_organization( self, pritunl_settings, pritunl_organization_data, post_pritunl_organization_mock, ): api._post_pritunl_organization = post_pritunl_organization_mock() create_response = api.post_pritunl_organization( **dict_merge( pritunl_settings, {"organization_name": pritunl_organization_data["name"]}, ) ) # Ensure provided settings match with the ones returned by Pritunl for k, v in iteritems(pritunl_organization_data): assert create_response[k] == v
def get_container_data(self, names): """Create Inventory of the container Iterate through the different branches of the containers and collect Informations. Args: list(names): List of container names Kwargs: None Raises: None Returns: None""" # tuple(('instances','metadata/templates')) to get section in branch # e.g. /1.0/instances/<name>/metadata/templates branches = ['containers', ('instances', 'state')] container_config = {} for branch in branches: for name in names: container_config['containers'] = self._get_config(branch, name) self.data = dict_merge(container_config, self.data)
def get_pritunl_organizations(module): org_name = module.params.get("organization") organizations = [] organizations = list_pritunl_organizations(**dict_merge( get_pritunl_settings(module), {"filters": { "name": org_name } if org_name else None}, )) if org_name and len(organizations) == 0: # When an org_name is provided but no organization match return an error module.fail_json(msg="Organization '%s' does not exist" % org_name) result = {} result["changed"] = False result["organizations"] = organizations module.exit_json(**result)
def test_list_filtered_pritunl_user( self, pritunl_settings, get_pritunl_user_mock, org_id, user_filters, user_expected, ): api._get_pritunl_users = get_pritunl_user_mock() response = api.list_pritunl_users( **dict_merge(pritunl_settings, { "organization_id": org_id, "filters": user_filters })) assert len(response) > 0 for user in response: assert user["organization"] == org_id assert user["name"] == user_expected
def test_absent_with_existing_users(self): """Test organization removal with attached users should fail except if force is true.""" module_args = { "state": "absent", "pritunl_api_token": "token", "pritunl_api_secret": "secret", "pritunl_url": "https://pritunl.domain.com", "name": "GumGum", } set_module_args(module_args) # Test deletion with self.patch_get_pritunl_organizations( side_effect=PritunlListOrganizationMock ) as mock_get: with self.patch_delete_pritunl_organization( side_effect=PritunlDeleteOrganizationMock ) as mock_delete: with self.assertRaises(AnsibleFailJson) as failure_result: self.module.main() failure_exc = failure_result.exception.args[0] self.assertRegex(failure_exc["msg"], "Can not remove organization") # Switch force=True which should run successfully set_module_args(dict_merge(module_args, {"force": True})) with self.patch_get_pritunl_organizations( side_effect=PritunlListOrganizationMock ) as mock_get: with self.patch_delete_pritunl_organization( side_effect=PritunlDeleteOrganizationMock ) as mock_delete: with self.assertRaises(AnsibleExitJson) as delete_result: self.module.main() delete_exc = delete_result.exception.args[0] self.assertTrue(delete_exc["changed"])
def get_network_data(self, names): """Create Inventory of the instance Iterate through the different branches of the instances and collect Informations. Args: list(names): List of instance names Kwargs: None Raises: None Returns: None""" # tuple(('instances','metadata/templates')) to get section in branch # e.g. /1.0/instances/<name>/metadata/templates branches = [('networks', 'state')] network_config = {} for branch in branches: for name in names: try: network_config['networks'] = self._get_config(branch, name) except LXDClientException: network_config['networks'] = {name: None} self.data = dict_merge(network_config, self.data)
def perform_action(self, resource, definition): delete_options = self.params.get('delete_options') result = {'changed': False, 'result': {}} state = self.params.get('state', None) force = self.params.get('force', False) name = definition['metadata'].get('name') namespace = definition['metadata'].get('namespace') existing = None wait = self.params.get('wait') wait_sleep = self.params.get('wait_sleep') wait_timeout = self.params.get('wait_timeout') wait_condition = None if self.params.get('wait_condition' ) and self.params['wait_condition'].get('type'): wait_condition = self.params['wait_condition'] self.remove_aliases() try: # ignore append_hash for resources other than ConfigMap and Secret if self.append_hash and definition['kind'] in [ 'ConfigMap', 'Secret' ]: name = '%s-%s' % (name, generate_hash(definition)) definition['metadata']['name'] = name params = dict(name=name) if namespace: params['namespace'] = namespace existing = resource.get(**params) except (NotFoundError, MethodNotAllowedError): # Remove traceback so that it doesn't show up in later failures try: sys.exc_clear() except AttributeError: # no sys.exc_clear on python3 pass except ForbiddenError as exc: if definition['kind'] in ['Project', 'ProjectRequest' ] and state != 'absent': return self.create_project_request(definition) self.fail_json( msg='Failed to retrieve requested object: {0}'.format( exc.body), error=exc.status, status=exc.status, reason=exc.reason) except DynamicApiError as exc: self.fail_json( msg='Failed to retrieve requested object: {0}'.format( exc.body), error=exc.status, status=exc.status, reason=exc.reason) except ValueError as value_exc: self.fail_json( msg='Failed to retrieve requested object: {0}'.format( to_native(value_exc)), error='', status='', reason='') if state == 'absent': result['method'] = "delete" if not existing: # The object already does not exist return result else: # Delete the object result['changed'] = True if not self.check_mode: if delete_options: body = { 'apiVersion': 'v1', 'kind': 'DeleteOptions', } body.update(delete_options) params['body'] = body try: k8s_obj = resource.delete(**params) result['result'] = k8s_obj.to_dict() except DynamicApiError as exc: self.fail_json( msg="Failed to delete object: {0}".format( exc.body), error=exc.status, status=exc.status, reason=exc.reason) if wait: success, resource, duration = self.wait( resource, definition, wait_sleep, wait_timeout, 'absent') result['duration'] = duration if not success: self.fail_json(msg="Resource deletion timed out", **result) return result else: if self.apply: if self.check_mode: ignored, patch = apply_object( resource, _encode_stringdata(definition)) if existing: k8s_obj = dict_merge(existing.to_dict(), patch) else: k8s_obj = patch else: try: k8s_obj = resource.apply( definition, namespace=namespace).to_dict() except DynamicApiError as exc: msg = "Failed to apply object: {0}".format(exc.body) if self.warnings: msg += "\n" + "\n ".join(self.warnings) self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason) success = True result['result'] = k8s_obj if wait and not self.check_mode: success, result['result'], result['duration'] = self.wait( resource, definition, wait_sleep, wait_timeout, condition=wait_condition) if existing: existing = existing.to_dict() else: existing = {} match, diffs = self.diff_objects(existing, result['result']) result['changed'] = not match result['diff'] = diffs result['method'] = 'apply' if not success: self.fail_json(msg="Resource apply timed out", **result) return result if not existing: if self.check_mode: k8s_obj = _encode_stringdata(definition) else: try: k8s_obj = resource.create( definition, namespace=namespace).to_dict() except ConflictError: # Some resources, like ProjectRequests, can't be created multiple times, # because the resources that they create don't match their kind # In this case we'll mark it as unchanged and warn the user self.warn( "{0} was not found, but creating it returned a 409 Conflict error. This can happen \ if the resource you are creating does not directly create a resource of the same kind." .format(name)) return result except DynamicApiError as exc: msg = "Failed to create object: {0}".format(exc.body) if self.warnings: msg += "\n" + "\n ".join(self.warnings) self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason) except Exception as exc: msg = "Failed to create object: {0}".format(exc) if self.warnings: msg += "\n" + "\n ".join(self.warnings) self.fail_json(msg=msg, error='', status='', reason='') success = True result['result'] = k8s_obj if wait and not self.check_mode: success, result['result'], result['duration'] = self.wait( resource, definition, wait_sleep, wait_timeout, condition=wait_condition) result['changed'] = True result['method'] = 'create' if not success: self.fail_json(msg="Resource creation timed out", **result) return result match = False diffs = [] if existing and force: if self.check_mode: k8s_obj = _encode_stringdata(definition) else: try: k8s_obj = resource.replace( definition, name=name, namespace=namespace, append_hash=self.append_hash).to_dict() except DynamicApiError as exc: msg = "Failed to replace object: {0}".format(exc.body) if self.warnings: msg += "\n" + "\n ".join(self.warnings) self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason) match, diffs = self.diff_objects(existing.to_dict(), k8s_obj) success = True result['result'] = k8s_obj if wait and not self.check_mode: success, result['result'], result['duration'] = self.wait( resource, definition, wait_sleep, wait_timeout, condition=wait_condition) match, diffs = self.diff_objects(existing.to_dict(), result['result']) result['changed'] = not match result['method'] = 'replace' result['diff'] = diffs if not success: self.fail_json(msg="Resource replacement timed out", **result) return result # Differences exist between the existing obj and requested params if self.check_mode: k8s_obj = dict_merge(existing.to_dict(), _encode_stringdata(definition)) else: if LooseVersion( self.openshift_version) < LooseVersion("0.6.2"): k8s_obj, error = self.patch_resource( resource, definition, existing, name, namespace) else: for merge_type in self.params['merge_type'] or [ 'strategic-merge', 'merge' ]: k8s_obj, error = self.patch_resource( resource, definition, existing, name, namespace, merge_type=merge_type) if not error: break if error: self.fail_json(**error) success = True result['result'] = k8s_obj if wait and not self.check_mode: success, result['result'], result['duration'] = self.wait( resource, definition, wait_sleep, wait_timeout, condition=wait_condition) match, diffs = self.diff_objects(existing.to_dict(), result['result']) result['changed'] = not match result['method'] = 'patch' result['diff'] = diffs if not success: self.fail_json(msg="Resource update timed out", **result) return result