def test_keywords_not_in_PY2(): """In Python 2 ("True", "False", "None") are not keywords. The isidentifier method ensures that those are treated as keywords on both Python 2 and 3. """ assert not isidentifier("True") assert not isidentifier("False") assert not isidentifier("None")
def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect facts = dict() cacheable = boolean(self._task.args.pop('cacheable', False)) if self._task.args: for (k, v) in iteritems(self._task.args): k = self._templar.template(k) if not isidentifier(k): result['failed'] = True result['msg'] = ( "The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only " "letters, numbers and underscores." % k) return result if not C.DEFAULT_JINJA2_NATIVE and isinstance( v, string_types) and v.lower() in ('true', 'false', 'yes', 'no'): v = boolean(v, strict=False) facts[k] = v result['changed'] = False result['ansible_facts'] = facts result['_ansible_facts_cacheable'] = cacheable return result
def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect facts = dict() cacheable = boolean(self._task.args.pop('cacheable', False)) if self._task.args: for (k, v) in iteritems(self._task.args): k = self._templar.template(k) if not isidentifier(k): result['failed'] = True result['msg'] = ("The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only " "letters, numbers and underscores." % k) return result if isinstance(v, string_types) and v.lower() in ('true', 'false', 'yes', 'no'): v = boolean(v, strict=False) facts[k] = v result['changed'] = False result['ansible_facts'] = facts result['_ansible_facts_cacheable'] = cacheable return result
def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) facts = dict() if self._task.args: for (k, v) in iteritems(self._task.args): k = self._templar.template(k) if not isidentifier(k): result['failed'] = True result[ 'msg'] = "The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only letters, numbers and underscores." % k return result if isinstance(v, basestring) and v.lower() in ('true', 'false', 'yes', 'no'): v = boolean(v) facts[k] = v result['changed'] = False result['ansible_facts'] = facts return result
def run(self, tmp=None, task_vars=dict()): facts = dict() if self._task.args: for (k, v) in iteritems(self._task.args): k = self._templar.template(k) if not isidentifier(k): return dict(failed=True, msg="The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only letters, numbers and underscores." % k) if isinstance(v, basestring) and v.lower() in ('true', 'false', 'yes', 'no'): v = boolean(v) facts[k] = v return dict(changed=False, ansible_facts=facts)
def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect stats = {'data': {}, 'per_host': False, 'aggregate': True} if self._task.args: data = self._task.args.get('data', {}) if not isinstance(data, dict): data = self._templar.template(data, convert_bare=False, fail_on_undefined=True) if not isinstance(data, dict): result['failed'] = True result[ 'msg'] = "The 'data' option needs to be a dictionary/hash" return result # set boolean options, defaults are set above in stats init for opt in ['per_host', 'aggregate']: val = self._task.args.get(opt, None) if val is not None: if not isinstance(val, bool): stats[opt] = boolean(self._templar.template(val), strict=False) else: stats[opt] = val for (k, v) in iteritems(data): k = self._templar.template(k) if not isidentifier(k): result['failed'] = True result['msg'] = ( "The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only " "letters, numbers and underscores." % k) return result stats['data'][k] = self._templar.template(v) result['changed'] = False result['ansible_stats'] = stats return result
def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect stats = {'data': {}, 'per_host': False, 'aggregate': True} if self._task.args: data = self._task.args.get('data', {}) if not isinstance(data, dict): data = self._templar.template(data, convert_bare=False, fail_on_undefined=True) if not isinstance(data, dict): result['failed'] = True result['msg'] = "The 'data' option needs to be a dictionary/hash" return result # set boolean options, defaults are set above in stats init for opt in ['per_host', 'aggregate']: val = self._task.args.get(opt, None) if val is not None: if not isinstance(val, bool): stats[opt] = boolean(self._templar.template(val), strict=False) else: stats[opt] = val for (k, v) in iteritems(data): k = self._templar.template(k) if not isidentifier(k): result['failed'] = True result['msg'] = ("The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only " "letters, numbers and underscores." % k) return result stats['data'][k] = self._templar.template(v) result['changed'] = False result['ansible_stats'] = stats return result
def run(self, tmp=None, task_vars=dict()): facts = dict() if self._task.args: for (k, v) in self._task.args.iteritems(): k = self._templar.template(k) if not isidentifier(k): return dict( failed=True, msg= "The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only letters, numbers and underscores." % k) if isinstance(v, basestring) and v.lower() in ('true', 'false', 'yes', 'no'): v = boolean(v) facts[k] = v return dict(changed=False, ansible_facts=facts)
def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect facts = {} cacheable = boolean(self._task.args.pop('cacheable', False)) if self._task.args: for (k, v) in iteritems(self._task.args): k = self._templar.template(k) if not isidentifier(k): raise AnsibleActionFail( "The variable name '%s' is not valid. Variables must start with a letter or underscore character, " "and contain only letters, numbers and underscores." % k) # NOTE: this should really use BOOLEANS from convert_bool, but only in the k=v case, # right now it converts matching explicit YAML strings also when 'jinja2_native' is disabled. if not C.DEFAULT_JINJA2_NATIVE and isinstance( v, string_types) and v.lower() in ('true', 'false', 'yes', 'no'): v = boolean(v, strict=False) facts[k] = v else: raise AnsibleActionFail( 'No key/value pairs provided, at least one is required for this action to succeed' ) if facts: # just as _facts actions, we don't set changed=true as we are not modifying the actual host result['ansible_facts'] = facts result['_ansible_facts_cacheable'] = cacheable else: # this should not happen, but JIC we get here raise AnsibleActionFail( 'Unable to create any variables with provided arguments') return result
def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) facts = dict() if self._task.args: for (k, v) in iteritems(self._task.args): k = self._templar.template(k) if not isidentifier(k): result['failed'] = True result['msg'] = "The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only letters, numbers and underscores." % k return result if isinstance(v, basestring) and v.lower() in ('true', 'false', 'yes', 'no'): v = boolean(v) facts[k] = v result['changed'] = False result['ansible_facts'] = facts return result
def _validate_variable_keys(ds): for key in ds: if not isidentifier(key): raise TypeError("'%s' is not a valid variable name" % key)
def run(self, tmp=None, task_vars=None): suffix_to_merge = self._task.args.get('suffix_to_merge', '') merged_var_name = self._task.args.get('merged_var_name', '') dedup = self._task.args.get('dedup', True) expected_type = self._task.args.get('expected_type') cacheable = bool(self._task.args.get('cacheable', False)) recursive_dict_merge = bool( self._task.args.get('recursive_dict_merge', False)) all_keys = task_vars.keys() # Validate args if expected_type not in ['dict', 'list']: raise AnsibleError("expected_type must be set ('dict' or 'list').") if not merged_var_name: raise AnsibleError("merged_var_name must be set") if not isidentifier(merged_var_name): raise AnsibleError( "merged_var_name '%s' is not a valid identifier" % merged_var_name) if not suffix_to_merge.endswith('__to_merge'): raise AnsibleError( "Merge suffix must end with '__to_merge', sorry!") if merged_var_name in all_keys: warning = "{} is already defined, are you sure you want to overwrite it?" display.warning(warning.format(merged_var_name)) display.v("The contents of {} are: {}".format( merged_var_name, task_vars[merged_var_name])) keys = sorted( [key for key in task_vars.keys() if key.endswith(suffix_to_merge)]) display.v("Merging vars in this order: {}".format(keys)) # We need to render any jinja in the merged var now, because once it # leaves this plugin, ansible will cleanse it by turning any jinja tags # into comments. # And we need it done before merging the variables, # in case any structured data is specified with templates. merge_vals = [self._templar.template(task_vars[key]) for key in keys] # Dispatch based on type that we're merging if merge_vals == []: if expected_type == 'list': merged = [] else: merged = {} # pylint: disable=redefined-variable-type elif isinstance(merge_vals[0], list): merged = merge_list(merge_vals, dedup) elif isinstance(merge_vals[0], dict): merged = merge_dict(merge_vals, dedup, recursive_dict_merge) else: raise AnsibleError( "Don't know how to merge variables of type: {}".format( type(merge_vals[0]))) return { 'ansible_facts': { merged_var_name: merged }, 'ansible_facts_cacheable': cacheable, 'changed': False, }
def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() result = super().run(tmp, task_vars) del tmp # tmp no longer has any effect root_key = "" if self._task.args: if "root_key" in self._task.args: n = self._task.args.get("root_key") n = self._templar.template(n) if not isidentifier(n): raise AnsibleActionFail("The argument 'root_key' value of '%s' is not valid. Keys must start with a letter or underscore character, " "and contain only letters, numbers and underscores." % n) root_key = n if "templates" in self._task.args: t = self._task.args.get("templates") if isinstance(t, list): template_list = t else: raise AnsibleActionFail("The argument 'templates' is not a list") else: raise AnsibleActionFail("The argument 'templates' must be set") else: raise AnsibleActionFail("The argument 'templates' must be set") output = dict() template_lookup_module = TemplateLookupModule(loader=self._loader, templar=self._templar) template_vars = task_vars for template_item in template_list: template = template_item.get('template') if not template: raise AnsibleActionFail("Invalid template data") template_options = template_item.get('options', {}) list_merge = template_options.get('list_merge', 'append') strip_empty_keys = template_options.get('strip_empty_keys', True) if root_key: template_vars[root_key] = output else: template_vars = combine(task_vars, output, recursive=True) template_output = template_lookup_module.run([template], template_vars) template_output_data = yaml.safe_load(template_output[0]) if strip_empty_keys: template_output_data = strip_null_from_data(template_output_data) if template_output_data: output = combine(output, template_output_data, recursive=True, list_merge=list_merge) if root_key: result['ansible_facts'] = {root_key: output} else: result['ansible_facts'] = output return result
def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = {} result = super().run(tmp, task_vars) del tmp # tmp no longer has any effect root_key = "" if self._task.args: cprofile_file = self._task.args.get("cprofile_file") if cprofile_file: profiler = cProfile.Profile() profiler.enable() if "root_key" in self._task.args: n = self._task.args.get("root_key") n = self._templar.template(n) if not isidentifier(n): raise AnsibleActionFail( f"The argument 'root_key' value of '{n}' is not valid. Keys must start with a letter or underscore character, \ and contain only letters, numbers and underscores." ) root_key = n if "templates" in self._task.args: t = self._task.args.get("templates") if isinstance(t, list): template_list = t else: raise AnsibleActionFail( "The argument 'templates' is not a list") else: raise AnsibleActionFail("The argument 'templates' must be set") dest = self._task.args.get("dest", False) template_output = self._task.args.get("template_output", False) debug = self._task.args.get("debug", False) remove_avd_switch_facts = self._task.args.get( "remove_avd_switch_facts", False) else: raise AnsibleActionFail("The argument 'templates' must be set") output = {} template_lookup_module = lookup_loader.get('ansible.builtin.template', loader=self._loader, templar=self._templar) template_vars = task_vars # If the argument 'debug' is set, a 'avd_yaml_templates_to_facts_debug' list will be added to the output. # This list contains timestamps from every step for every template. This is useful for identifying slow templates. # Here we pull in the list from any previous tasks, so we can just add the the list. if debug: avd_yaml_templates_to_facts_debug = template_vars.get( 'avd_yaml_templates_to_facts_debug', []) for template_item in template_list: if debug: debug_item = template_item debug_item['timestamps'] = {"starting": datetime.now()} template = template_item.get('template') if not template: raise AnsibleActionFail("Invalid template data") template_options = template_item.get('options', {}) list_merge = template_options.get('list_merge', 'append') strip_empty_keys = template_options.get('strip_empty_keys', True) # If the argument 'root_key' is set, output will be assigned to this variable. If not set, the output will be set at as "root" variables. # Here we combine the previous output with the input task_vars, to be able to use variables generated by the previous template in the next. if root_key: template_vars[root_key] = output else: template_vars = combine(task_vars, output, recursive=True) if debug: debug_item['timestamps']['run_template'] = datetime.now() # Here we parse the template, expecting the result to be a YAML formatted string template_result = template_lookup_module.run([template], template_vars) if debug: debug_item['timestamps']['load_yaml'] = datetime.now() # Load data from the template result. template_result_data = yaml.safe_load(template_result[0]) # If the argument 'strip_empty_keys' is set, remove keys with value of null / None from the resulting dict (recursively). if strip_empty_keys: if debug: debug_item['timestamps'][ 'strip_empty_keys'] = datetime.now() template_result_data = strip_null_from_data( template_result_data) # If there is any data produced by the template, combine it on top of previous output. if template_result_data: if debug: debug_item['timestamps']['combine_data'] = datetime.now() output = combine(output, template_result_data, recursive=True, list_merge=list_merge) if debug: debug_item['timestamps']['done'] = datetime.now() avd_yaml_templates_to_facts_debug.append(debug_item) # If the argument 'template_output' is set, run the output data through another jinja2 rendering. # This is to resolve any input values with inline jinja using variables/facts set by the input templates. if template_output: if debug: debug_item = { 'action': 'template_output', 'timestamps': { 'combine_data': datetime.now() } } if root_key: template_vars[root_key] = output else: template_vars = combine(task_vars, output, recursive=True) if debug: debug_item['timestamps']['templating'] = datetime.now() self._templar.available_variables = template_vars output = self._templar.template(output) if debug: debug_item['timestamps']['done'] = datetime.now() avd_yaml_templates_to_facts_debug.append(debug_item) # If the argument 'dest' is set, write the output data to a file. if dest: if debug: debug_item = { 'action': 'dest', 'dest': dest, 'timestamps': { 'write_file': datetime.now() } } # Depending on the file suffix of 'dest' (default: 'json') we will format the data to yaml or just write the output data directly. # The Copy module used in 'write_file' will convert the output data to json automatically. if dest.split('.')[-1] in ["yml", "yaml"]: write_file_result = self.write_file( yaml.dump(output, indent=2, sort_keys=False, width=130), task_vars) else: write_file_result = self.write_file(output, task_vars) # Overwrite result with the result from the copy operation (setting 'changed' flag accordingly) result.update(write_file_result) if debug: debug_item['timestamps']['done'] = datetime.now() avd_yaml_templates_to_facts_debug.append(debug_item) # If 'dest' is not set, hardcode 'changed' to true, since we don't know if something changed and later tasks may depend on this. else: result['changed'] = True if debug: output[ 'avd_yaml_templates_to_facts_debug'] = avd_yaml_templates_to_facts_debug # If the argument 'root_key' is set, output will be assigned to this variable. If not set, the output will be set at as "root" variables. if root_key: result['ansible_facts'] = {root_key: output} else: result['ansible_facts'] = output if remove_avd_switch_facts: result['ansible_facts']['avd_switch_facts'] = None if cprofile_file: profiler.disable() stats = pstats.Stats(profiler).sort_stats('cumtime') stats.dump_stats(cprofile_file) return result
def _execute(self, variables=None): ''' The primary workhorse of the executor system, this runs the task on the specified host (which may be the delegated_to host) and handles the retry/until and block rescue/always execution ''' if variables is None: variables = self._job_vars templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) context_validation_error = None try: # apply the given task's information to the connection info, # which may override some fields already set by the play or # the options specified on the command line self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar) # fields set from the play/task may be based on variables, so we have to # do the same kind of post validation step on it here before we use it. self._play_context.post_validate(templar=templar) # now that the play context is finalized, if the remote_addr is not set # default to using the host's address field as the remote address if not self._play_context.remote_addr: self._play_context.remote_addr = self._host.address # We also add "magic" variables back into the variables dict to make sure # a certain subset of variables exist. self._play_context.update_vars(variables) # FIXME: update connection/shell plugin options except AnsibleError as e: # save the error, which we'll raise later if we don't end up # skipping this task during the conditional evaluation step context_validation_error = e # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a # variable not being present which would otherwise cause validation to fail try: if not self._task.evaluate_conditional(templar, variables): display.debug("when evaluation is False, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional result was False', _ansible_no_log=self._play_context.no_log) except AnsibleError as e: # loop error takes precedence if self._loop_eval_error is not None: # Display the error from the conditional as well to prevent # losing information useful for debugging. display.v(to_text(e)) raise self._loop_eval_error # pylint: disable=raising-bad-type raise # Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task if self._loop_eval_error is not None: raise self._loop_eval_error # pylint: disable=raising-bad-type # if we ran into an error while setting up the PlayContext, raise it now if context_validation_error is not None: raise context_validation_error # pylint: disable=raising-bad-type # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host if self._task.action in ('include', 'include_tasks'): include_args = self._task.args.copy() include_file = include_args.pop('_raw_params', None) if not include_file: return dict(failed=True, msg="No include file was specified to the include") include_file = templar.template(include_file) return dict(include=include_file, include_args=include_args) # if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host elif self._task.action == 'include_role': include_args = self._task.args.copy() return dict(include_args=include_args) # Now we do final validation on the task, which sets all fields to their final values. try: self._task.post_validate(templar=templar) except AnsibleError: raise except Exception: return dict(changed=False, failed=True, _ansible_no_log=self._play_context.no_log, exception=to_text(traceback.format_exc())) if '_variable_params' in self._task.args: variable_params = self._task.args.pop('_variable_params') if isinstance(variable_params, dict): if C.INJECT_FACTS_AS_VARS: display.warning("Using a variable for a task's 'args' is unsafe in some situations " "(see https://docs.ansible.com/ansible/devel/reference_appendices/faq.html#argsplat-unsafe)") variable_params.update(self._task.args) self._task.args = variable_params # get the connection and the handler for this execution if (not self._connection or not getattr(self._connection, 'connected', False) or self._play_context.remote_addr != self._connection._play_context.remote_addr): self._connection = self._get_connection(variables=variables, templar=templar) else: # if connection is reused, its _play_context is no longer valid and needs # to be replaced with the one templated above, in case other data changed self._connection._play_context = self._play_context if self._task.delegate_to: # use vars from delegated host (which already include task vars) instead of original host delegated_vars = variables.get('ansible_delegated_vars', {}).get(self._task.delegate_to, {}) orig_vars = templar.available_variables templar.available_variables = delegated_vars plugin_vars = self._set_connection_options(delegated_vars, templar) templar.available_variables = orig_vars else: # just use normal host vars plugin_vars = self._set_connection_options(variables, templar) # get handler self._handler = self._get_action_handler(connection=self._connection, templar=templar) # Apply default params for action/module, if present self._task.args = get_action_args_with_defaults( self._task.action, self._task.args, self._task.module_defaults, templar, self._task._ansible_internal_redirect_list ) # And filter out any fields which were set to default(omit), and got the omit token value omit_token = variables.get('omit') if omit_token is not None: self._task.args = remove_omit(self._task.args, omit_token) # Read some values from the task, so that we can modify them if need be if self._task.until: retries = self._task.retries if retries is None: retries = 3 elif retries <= 0: retries = 1 else: retries += 1 else: retries = 1 delay = self._task.delay if delay < 0: delay = 1 # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions vars_copy = variables.copy() display.debug("starting attempt loop") result = None for attempt in xrange(1, retries + 1): display.debug("running the handler") try: if self._task.timeout: old_sig = signal.signal(signal.SIGALRM, task_timeout) signal.alarm(self._task.timeout) result = self._handler.run(task_vars=variables) except AnsibleActionSkip as e: return dict(skipped=True, msg=to_text(e)) except AnsibleActionFail as e: return dict(failed=True, msg=to_text(e)) except AnsibleConnectionFailure as e: return dict(unreachable=True, msg=to_text(e)) except TaskTimeoutError as e: msg = 'The %s action failed to execute in the expected time frame (%d) and was terminated' % (self._task.action, self._task.timeout) return dict(failed=True, msg=msg) finally: if self._task.timeout: signal.alarm(0) old_sig = signal.signal(signal.SIGALRM, old_sig) self._handler.cleanup() display.debug("handler run complete") # preserve no log result["_ansible_no_log"] = self._play_context.no_log # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: if not isidentifier(self._task.register): raise AnsibleError("Invalid variable name in 'register' specified: '%s'" % self._task.register) vars_copy[self._task.register] = result = wrap_var(result) if self._task.async_val > 0: if self._task.poll > 0 and not result.get('skipped') and not result.get('failed'): result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy) # FIXME callback 'v2_runner_on_async_poll' here # ensure no log is preserved result["_ansible_no_log"] = self._play_context.no_log # helper methods for use below in evaluating changed/failed_when def _evaluate_changed_when_result(result): if self._task.changed_when is not None and self._task.changed_when: cond = Conditional(loader=self._loader) cond.when = self._task.changed_when result['changed'] = cond.evaluate_conditional(templar, vars_copy) def _evaluate_failed_when_result(result): if self._task.failed_when: cond = Conditional(loader=self._loader) cond.when = self._task.failed_when failed_when_result = cond.evaluate_conditional(templar, vars_copy) result['failed_when_result'] = result['failed'] = failed_when_result else: failed_when_result = False return failed_when_result if 'ansible_facts' in result: if self._task.action in ('set_fact', 'include_vars'): vars_copy.update(result['ansible_facts']) else: # TODO: cleaning of facts should eventually become part of taskresults instead of vars af = wrap_var(result['ansible_facts']) vars_copy.update(namespace_facts(af)) if C.INJECT_FACTS_AS_VARS: vars_copy.update(clean_facts(af)) # set the failed property if it was missing. if 'failed' not in result: # rc is here for backwards compatibility and modules that use it instead of 'failed' if 'rc' in result and result['rc'] not in [0, "0"]: result['failed'] = True else: result['failed'] = False # Make attempts and retries available early to allow their use in changed/failed_when if self._task.until: result['attempts'] = attempt # set the changed property if it was missing. if 'changed' not in result: result['changed'] = False # re-update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution # This gives changed/failed_when access to additional recently modified # attributes of result if self._task.register: vars_copy[self._task.register] = result = wrap_var(result) # if we didn't skip this task, use the helpers to evaluate the changed/ # failed_when properties if 'skipped' not in result: _evaluate_changed_when_result(result) _evaluate_failed_when_result(result) if retries > 1: cond = Conditional(loader=self._loader) cond.when = self._task.until if cond.evaluate_conditional(templar, vars_copy): break else: # no conditional check, or it failed, so sleep for the specified time if attempt < retries: result['_ansible_retry'] = True result['retries'] = retries display.debug('Retrying task, attempt %d of %d' % (attempt, retries)) self._final_q.put(TaskResult(self._host.name, self._task._uuid, result, task_fields=self._task.dump_attrs()), block=False) time.sleep(delay) self._handler = self._get_action_handler(connection=self._connection, templar=templar) else: if retries > 1: # we ran out of attempts, so mark the result as failed result['attempts'] = retries - 1 result['failed'] = True # do the final update of the local variables here, for both registered # values and any facts which may have been created if self._task.register: variables[self._task.register] = result = wrap_var(result) if 'ansible_facts' in result: if self._task.action in ('set_fact', 'include_vars'): variables.update(result['ansible_facts']) else: # TODO: cleaning of facts should eventually become part of taskresults instead of vars af = wrap_var(result['ansible_facts']) variables.update(namespace_facts(af)) if C.INJECT_FACTS_AS_VARS: variables.update(clean_facts(af)) # save the notification target in the result, if it was specified, as # this task may be running in a loop in which case the notification # may be item-specific, ie. "notify: service {{item}}" if self._task.notify is not None: result['_ansible_notify'] = self._task.notify # add the delegated vars to the result, so we can reference them # on the results side without having to do any further templating if self._task.delegate_to: result["_ansible_delegated_vars"] = {'ansible_delegated_host': self._task.delegate_to} for k in plugin_vars: result["_ansible_delegated_vars"][k] = delegated_vars.get(k) # and return display.debug("attempt loop complete, returning result") return result
def run(self, tmp=None, task_vars=None): suffix_to_merge = self._task.args.get('suffix_to_merge', '') merged_var_name = self._task.args.get('merged_var_name', '') dedup = self._task.args.get('dedup', True) expected_type = self._task.args.get('expected_type') recursive_dict_merge = bool( self._task.args.get('recursive_dict_merge', False)) if 'cacheable' in self._task.args.keys(): display.deprecated( "The `cacheable` option does not actually do anything, since Ansible 2.5. " "No matter what, the variable set by this plugin will be set in the fact " "cache if you have fact caching enabled. To get rid of this warning, " "remove the `cacheable` argument from your merge_vars task. This warning " "will be removed in a future version of this plugin.") # Validate args if expected_type not in ['dict', 'list']: raise AnsibleError("expected_type must be set ('dict' or 'list').") if not merged_var_name: raise AnsibleError("merged_var_name must be set") if not isidentifier(merged_var_name): raise AnsibleError( "merged_var_name '%s' is not a valid identifier" % merged_var_name) if not suffix_to_merge.endswith('__to_merge'): raise AnsibleError( "Merge suffix must end with '__to_merge', sorry!") keys = sorted( [key for key in task_vars.keys() if key.endswith(suffix_to_merge)]) display.v("Merging vars in this order: {}".format(keys)) # We need to render any jinja in the merged var now, because once it # leaves this plugin, ansible will cleanse it by turning any jinja tags # into comments. # And we need it done before merging the variables, # in case any structured data is specified with templates. merge_vals = [self._templar.template(task_vars[key]) for key in keys] # Dispatch based on type that we're merging if merge_vals == []: if expected_type == 'list': merged = [] else: merged = {} elif isinstance(merge_vals[0], list): merged = merge_list(merge_vals, dedup) elif isinstance(merge_vals[0], dict): merged = merge_dict(merge_vals, dedup, recursive_dict_merge) else: raise AnsibleError( "Don't know how to merge variables of type: {}".format( type(merge_vals[0]))) return { 'ansible_facts': { merged_var_name: merged }, 'changed': False, }
def test_valid_identifier(identifier): assert isidentifier(identifier)
def test_invalid_identifier(identifier): assert not isidentifier(identifier)
def test_non_ascii(): """In Python 3 non-ascii characters are allowed as opposed to Python 2. The isidentifier method ensures that those are treated as keywords on both Python 2 and 3. """ assert not isidentifier("křížek")
def run(self, tmp=None, task_vars=None): suffix_to_merge = self._task.args.get('suffix_to_merge', '') merged_var_name = self._task.args.get('merged_var_name', '') expected_type = self._task.args.get('expected_type', 'dict') additional_merge_tree = self._task.args.get('additional_merge_tree', []) additional_merge_by_key = self._task.args.get('additional_merge_by_key', []) all_keys = task_vars.keys() if not merged_var_name: raise AnsibleError("merged_var_name must be set") if not isidentifier(merged_var_name): raise AnsibleError("merged_var_name '%s' is not a valid identifier" % merged_var_name) keys = sorted([key for key in task_vars.keys() if key.endswith(suffix_to_merge)]) display.v("Merging vars in this order: {}".format(keys)) merge_vals = [self._templar.template(task_vars[key]) for key in keys] merged = None if expected_type == 'list': merged = list(itertools.chain.from_iterable(merge_vals)) else: merged = {} for val in merge_vals: dpath.util.merge(dst=merged, src=val, flags=dpath.util.MERGE_ADDITIVE) for k in additional_merge_tree: try: r = dpath.util.get(merged, k) m = {} if isinstance(r, list): for d in r: if isinstance(d, dict): display.v("Merging values {} in path {}".format(d, k)) dpath.util.merge(dst=m, src=d, flags=dpath.util.MERGE_ADDITIVE) if m: display.v("Set merged values {} to path {}".format(m, k)) dpath.util.set(merged, k, [m]) else: display.warning("Merge not implemented for followin type {} by path".format(type(r), k)) except KeyError as e: display.warning("Key doesnt exists: {}".format(e)) for k in additional_merge_by_key: try: key, merge_by = list(k.items())[0] def keyfunc(x): return x[merge_by] m = [] data = sorted(dpath.util.get(merged, key), key=keyfunc) for merge_value, group in itertools.groupby(data, keyfunc): mm = {} display.v("Merging path {} by key {} and value {}".format(key, merge_by, merge_value)) for g in list(group): dpath.util.merge(dst=mm, src=g, flags=dpath.util.MERGE_ADDITIVE) m.append(mm) if m: dpath.util.new(merged, key, m) except KeyError as e: display.warning("Key {} doesnt exists: in {}".format(e, k)) return { 'ansible_facts': {merged_var_name: merged}, 'ansible_facts_cacheable': False, 'changed': False, }