def _save_ssh_host_keys(self, filename):
        '''
        not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks
        don't complain about it :)
        '''

        if not self._any_keys_added():
            return False

        path = os.path.expanduser("~/.ssh")
        makedirs_safe(path)

        f = open(filename, 'w')

        for hostname, keys in iteritems(self.ssh._host_keys):

            for keytype, key in iteritems(keys):

                # was f.write
                added_this_time = getattr(key, '_added_by_ansible_this_time', False)
                if not added_this_time:
                    f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))

        for hostname, keys in iteritems(self.ssh._host_keys):

            for keytype, key in iteritems(keys):
                added_this_time = getattr(key, '_added_by_ansible_this_time', False)
                if added_this_time:
                    f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))

        f.close()
    def _any_keys_added(self):

        for hostname, keys in iteritems(self.ssh._host_keys):
            for keytype, key in iteritems(keys):
                added_this_time = getattr(key, '_added_by_ansible_this_time', False)
                if added_this_time:
                    return True
        return False
    def _get_magic_variables(self, loader, play, host, task, include_hostvars, include_delegate_to):
        '''
        Returns a dictionary of so-called "magic" variables in Ansible,
        which are special variables we set internally for use.
        '''

        variables = dict()
        variables['playbook_dir'] = loader.get_basedir()

        if host:
            variables['group_names'] = sorted([group.name for group in host.get_groups() if group.name != 'all'])

            if self._inventory is not None:
                variables['groups']  = dict()
                for (group_name, group) in iteritems(self._inventory.groups):
                    variables['groups'][group_name] = [h.name for h in group.get_hosts()]
        if play:
            variables['role_names'] = [r._role_name for r in play.roles]

        if task:
            if task._role:
                variables['role_name'] = task._role.get_name()
                variables['role_path'] = task._role._role_path
                variables['role_uuid'] = text_type(task._role._uuid)

        if self._inventory is not None:
            variables['inventory_dir'] = self._inventory.basedir()
            variables['inventory_file'] = self._inventory.src()
            if play:
                # add the list of hosts in the play, as adjusted for limit/filters
                # DEPRECATED: play_hosts should be deprecated in favor of ansible_play_hosts,
                #             however this would take work in the templating engine, so for now
                #             we'll add both so we can give users something transitional to use
                host_list = [x.name for x in self._inventory.get_hosts()]
                variables['play_hosts'] = host_list
                variables['ansible_play_hosts'] = host_list

        # the 'omit' value alows params to be left out if the variable they are based on is undefined
        variables['omit'] = self._omit_token
        variables['ansible_version'] = CLI.version_info(gitinfo=False)
        # Set options vars
        for option, option_value in iteritems(self._options_vars):
            variables[option] = option_value

        if self._hostvars is not None and include_hostvars:
            variables['hostvars'] = self._hostvars

        return variables
    def preprocess_data(self, ds):
        '''
        Regorganizes the data for a PlaybookInclude datastructure to line
        up with what we expect the proper attributes to be
        '''

        assert isinstance(ds, dict)

        # the new, cleaned datastructure, which will have legacy
        # items reduced to a standard structure
        new_ds = AnsibleMapping()
        if isinstance(ds, AnsibleBaseYAMLObject):
            new_ds.ansible_pos = ds.ansible_pos

        for (k,v) in iteritems(ds):
            if k == 'include':
                self._preprocess_include(ds, new_ds, k, v)
            else:
                # some basic error checking, to make sure vars are properly
                # formatted and do not conflict with k=v parameters
                if k == 'vars':
                    if 'vars' in new_ds:
                        raise AnsibleParserError("include parameters cannot be mixed with 'vars' entries for include statements", obj=ds)
                    elif not isinstance(v, dict):
                        raise AnsibleParserError("vars for include statements must be specified as a dictionary", obj=ds)
                new_ds[k] = v

        return super(PlaybookInclude, self).preprocess_data(new_ds)
Exemple #5
0
def merge_hash(a, b):
    """
    Recursively merges hash b into a so that keys from b take precedence over keys from a
    """

    _validate_mutable_mappings(a, b)

    # if a is empty or equal to b, return b
    if a == {} or a == b:
        return b.copy()

    # if b is empty the below unfolds quickly
    result = a.copy()

    # next, iterate over b keys and values
    for k, v in iteritems(b):
        # if there's already such key in a
        # and that key contains a MutableMapping
        if k in result and isinstance(result[k], MutableMapping) and isinstance(v, MutableMapping):
            # merge those dicts recursively
            result[k] = merge_hash(result[k], v)
        else:
            # otherwise, just copy the value from b to a
            result[k] = v

    return result
Exemple #6
0
    def _split_role_params(self, ds):
        '''
        Splits any random role params off from the role spec and store
        them in a dictionary of params for parsing later
        '''

        role_def = dict()
        role_params = dict()
        base_attribute_names = frozenset(self._valid_attrs.keys())
        for (key, value) in iteritems(ds):
            # use the list of FieldAttribute values to determine what is and is not
            # an extra parameter for this role (or sub-class of this role)
            # FIXME: hard-coded list of exception key names here corresponds to the
            #        connection fields in the Base class. There may need to be some
            #        other mechanism where we exclude certain kinds of field attributes,
            #        or make this list more automatic in some way so we don't have to
            #        remember to update it manually.
            if key not in base_attribute_names or key in ('connection', 'port', 'remote_user'):
                if key in ('connection', 'port', 'remote_user'):
                    display.deprecated("Using '%s' as a role param has been deprecated. " % key + \
                                       "In the future, these values should be entered in the `vars:` " + \
                                       "section for roles, but for now we'll store it as both a param and an attribute.")
                    role_def[key] = value
                # this key does not match a field attribute, so it must be a role param
                role_params[key] = value
            else:
                # this is a field attribute, so copy it over directly
                role_def[key] = value

        return (role_def, role_params)
Exemple #7
0
    def run(self, tmp=None, task_vars=None):
        ''' transfer the given module name, plus the async module, then run it '''
        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)

        if self._play_context.check_mode:
            result['skipped'] = True
            result['msg'] = 'check mode not supported for this module'
            return result

        remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
        if not tmp:
            tmp = self._make_tmp_path(remote_user)
            self._cleanup_remote_tmp=True

        module_name = self._task.action



        env_string = self._compute_environment_string()

        module_args = self._task.args.copy()
        if self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG:
            module_args['_ansible_no_log'] = True

        # configure, upload, and chmod the target module
        (module_style, shebang, module_data, module_path) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
        remote_module_filename = self._connection._shell.get_remote_filename(module_path)
        remote_module_path = self._connection._shell.join_path(tmp, remote_module_filename)
        if module_style == 'binary':
            self._transfer_file(module_path, remote_module_path)
        else:
            self._transfer_data(remote_module_path, module_data)

        # configure, upload, and chmod the async_wrapper module
        (async_module_style, shebang, async_module_data, async_module_path) = self._configure_module(module_name='async_wrapper', module_args=dict(), task_vars=task_vars)
        async_module_remote_filename = self._connection._shell.get_remote_filename(async_module_path)
        remote_async_module_path = self._connection._shell.join_path(tmp, async_module_remote_filename)
        self._transfer_data(remote_async_module_path, async_module_data)

        argsfile = None
        if module_style in ('non_native_want_json', 'binary'):
            argsfile = self._transfer_data(self._connection._shell.join_path(tmp, 'arguments'), json.dumps(module_args))
        elif module_style == 'old':
            args_data = ""
            for k, v in iteritems(module_args):
                args_data += '%s="%s" ' % (k, pipes.quote(to_text(v)))
            argsfile = self._transfer_data(self._connection._shell.join_path(tmp, 'arguments'), args_data)

        remote_paths = tmp, remote_module_path, remote_async_module_path

        # argsfile doesn't need to be executable, but this saves an extra call to the remote host
        if argsfile:
            remote_paths += argsfile,

        self._fixup_perms2(remote_paths, remote_user, execute=True)

        async_limit = self._task.async
    def load(role_include, play, parent_role=None):
        try:
            # The ROLE_CACHE is a dictionary of role names, with each entry
            # containing another dictionary corresponding to a set of parameters
            # specified for a role as the key and the Role() object itself.
            # We use frozenset to make the dictionary hashable.

            params = role_include.get_role_params()
            if role_include.when is not None:
                params['when'] = role_include.when
            if role_include.tags is not None:
                params['tags'] = role_include.tags
            hashed_params = hash_params(params)
            if role_include.role in play.ROLE_CACHE:
                for (entry, role_obj) in iteritems(play.ROLE_CACHE[role_include.role]):
                    if hashed_params == entry:
                        if parent_role:
                            role_obj.add_parent(parent_role)
                        return role_obj

            r = Role(play=play)
            r._load_role_data(role_include, parent_role=parent_role)

            if role_include.role not in play.ROLE_CACHE:
                play.ROLE_CACHE[role_include.role] = dict()

            play.ROLE_CACHE[role_include.role][hashed_params] = r
            return r

        except RuntimeError:
            raise AnsibleError("A recursion loop was detected with the roles specified. Make sure child roles do not have dependencies on parent roles", obj=role_include._ds)
Exemple #9
0
    def preprocess_data(self, ds):
        '''
        tasks are especially complex arguments so need pre-processing.
        keep it short.
        '''

        assert isinstance(ds, dict)

        # the new, cleaned datastructure, which will have legacy
        # items reduced to a standard structure suitable for the
        # attributes of the task class
        new_ds = AnsibleMapping()
        if isinstance(ds, AnsibleBaseYAMLObject):
            new_ds.ansible_pos = ds.ansible_pos

        # use the args parsing class to determine the action, args,
        # and the delegate_to value from the various possible forms
        # supported as legacy
        args_parser = ModuleArgsParser(task_ds=ds)
        (action, args, delegate_to) = args_parser.parse()

        new_ds['action']      = action
        new_ds['args']        = args
        new_ds['delegate_to'] = delegate_to

        # we handle any 'vars' specified in the ds here, as we may
        # be adding things to them below (special handling for includes).
        # When that deprecated feature is removed, this can be too.
        if 'vars' in ds:
            # _load_vars is defined in Base, and is used to load a dictionary
            # or list of dictionaries in a standard way
            new_ds['vars'] = self._load_vars(None, ds.pop('vars'))
        else:
            new_ds['vars'] = dict()

        for (k,v) in iteritems(ds):
            if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell':
                # we don't want to re-assign these values, which were
                # determined by the ModuleArgsParser() above
                continue
            elif k.replace("with_", "") in lookup_loader:
                self._preprocess_loop(ds, new_ds, k, v)
            else:
                # pre-2.0 syntax allowed variables for include statements at the
                # top level of the task, so we move those into the 'vars' dictionary
                # here, and show a deprecation message as we will remove this at
                # some point in the future.
                if action == 'include' and k not in self._get_base_attributes() and k not in self.DEPRECATED_ATTRIBUTES:
                    self._display.deprecated("Specifying include variables at the top-level of the task is deprecated. Please see:\nhttp://docs.ansible.com/ansible/playbooks_roles.html#task-include-files-and-encouraging-reuse\n\nfor currently supported syntax regarding included files and variables")
                    new_ds['vars'][k] = v
                else:
                    new_ds[k] = v

        return super(Task, self).preprocess_data(new_ds)
Exemple #10
0
 def _merge_kv(self, ds):
     if ds is None:
         return ""
     elif isinstance(ds, string_types):
         return ds
     elif isinstance(ds, dict):
         buf = ""
         for (k,v) in iteritems(ds):
             if k.startswith('_'):
                 continue
             buf = buf + "%s=%s " % (k,v)
         buf = buf.strip()
         return buf
    def test_variable_manager_extra_vars(self):
        fake_loader = DictDataLoader({})

        extra_vars = dict(a=1, b=2, c=3)
        v = VariableManager()
        v.extra_vars = extra_vars

        vars = v.get_vars(loader=fake_loader, use_cache=False)

        for (key, val) in iteritems(extra_vars):
            self.assertEqual(vars.get(key), val)

        self.assertIsNot(v.extra_vars, extra_vars)
    def run(self, tmp=None, task_vars=dict()):
        facts = dict()
        if self._task.args:
            for (k, v) in iteritems(self._task.args):
                k = self._templar.template(k)

                if not isidentifier(k):
                    return dict(failed=True, msg="The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only letters, numbers and underscores." % k)

                if isinstance(v, basestring) and v.lower() in ('true', 'false', 'yes', 'no'):
                    v = boolean(v)
                facts[k] = v
        return dict(changed=False, ansible_facts=facts)
Exemple #13
0
    def get_group_dict(self):
        """
        In get_vars() we merge a 'magic' dictionary 'groups' with group name
        keys and hostname list values into every host variable set.

        Cache the creation of this structure here
        """

        if not self._group_dict_cache:
            for (group_name, group) in iteritems(self.groups):
                self._group_dict_cache[group_name] = [h.name for h in group.get_hosts()]

        return self._group_dict_cache
Exemple #14
0
    def __init__(self, templar, globals, locals=None, *extras):
        '''
        Initializes this object with a valid Templar() object, as
        well as several dictionaries of variables representing
        different scopes (in jinja2 terminology).
        '''

        self._templar = templar
        self._globals = globals
        self._extras  = extras
        self._locals  = dict()
        if isinstance(locals, dict):
            for key, val in iteritems(locals):
                if key[:2] == 'l_' and val is not missing:
                    self._locals[key[2:]] = val
Exemple #15
0
def hash_params(params):
    if not isinstance(params, dict):
        return params
    else:
        s = set()
        for k,v in iteritems(params):
            if isinstance(v, dict):
                s.update((k, hash_params(v)))
            elif isinstance(v, list):
                things = []
                for item in v:
                    things.append(hash_params(item))
                s.update((k, tuple(things)))
            else:
                s.update((k, v))
        return frozenset(s)
Exemple #16
0
    def validate(self, all_vars=dict()):
        ''' validation that is done at parse time, not load time '''

        # walk all fields in the object
        for (name, attribute) in iteritems(self._get_base_attributes()):

            # run validator only if present
            method = getattr(self, '_validate_%s' % name, None)
            if method:
                method(attribute, name, getattr(self, name))
            else:
                # and make sure the attribute is of the type it should be
                value = getattr(self, name)
                if value is not None:
                    if attribute.isa == 'string' and isinstance(value, (list, dict)):
                        raise AnsibleParserError("The field '%s' is supposed to be a string type,"
                                " however the incoming data structure is a %s" % (name, type(value)), obj=self.get_ds())
Exemple #17
0
    def deserialize(self, data):
        '''
        Given a dictionary of values, load up the field attributes for
        this object. As with serialize(), if there are any non-field
        attribute data members, this method will need to be overridden
        and extended.
        '''

        assert isinstance(data, dict)

        for (name, attribute) in iteritems(self._get_base_attributes()):
            if name in data:
                setattr(self, name, data[name])
            else:
                setattr(self, name, attribute.default)

        # restore the UUID field
        setattr(self, '_uuid', data.get('uuid'))
Exemple #18
0
    def _split_role_params(self, ds):
        '''
        Splits any random role params off from the role spec and store
        them in a dictionary of params for parsing later
        '''

        role_def = dict()
        role_params = dict()
        base_attribute_names = frozenset(self._get_base_attributes().keys())
        for (key, value) in iteritems(ds):
            # use the list of FieldAttribute values to determine what is and is not
            # an extra parameter for this role (or sub-class of this role)
            if key not in base_attribute_names:
                # this key does not match a field attribute, so it must be a role param
                role_params[key] = value
            else:
                # this is a field attribute, so copy it over directly
                role_def[key] = value

        return (role_def, role_params)
Exemple #19
0
    def load_data(self, ds, variable_manager=None, loader=None):
        ''' walk the input datastructure and assign any values '''

        assert ds is not None

        # cache the datastructure internally
        setattr(self, '_ds', ds)

        # the variable manager class is used to manage and merge variables
        # down to a single dictionary for reference in templating, etc.
        self._variable_manager = variable_manager

        # the data loader class is used to parse data from strings and files
        if loader is not None:
            self._loader = loader
        else:
            self._loader = DataLoader()

        # call the preprocess_data() function to massage the data into
        # something we can more easily parse, and then call the validation
        # function on it to ensure there are no incorrect key values
        ds = self.preprocess_data(ds)
        self._validate_attributes(ds)

        # Walk all attributes in the class. We sort them based on their priority
        # so that certain fields can be loaded before others, if they are dependent.
        for name, attr in sorted(iteritems(self._valid_attrs), key=operator.itemgetter(1)):
            # copy the value over unless a _load_field method is defined
            if name in ds:
                method = getattr(self, '_load_%s' % name, None)
                if method:
                    self._attributes[name] = method(name, ds[name])
                else:
                    self._attributes[name] = ds[name]

        # run early, non-critical validation
        self.validate()

        # return the constructed object
        return self
Exemple #20
0
    def run(self, tmp=None, task_vars=None):
        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)

        facts = dict()
        if self._task.args:
            for (k, v) in iteritems(self._task.args):
                k = self._templar.template(k)

                if not isidentifier(k):
                    result['failed'] = True
                    result['msg'] = "The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only letters, numbers and underscores." % k
                    return result

                if isinstance(v, string_types) and v.lower() in ('true', 'false', 'yes', 'no'):
                    v = boolean(v)
                facts[k] = v

        result['changed'] = False
        result['ansible_facts'] = facts
        return result
Exemple #21
0
    def merge_data(self, dest, extend=False):

        # Walk all attributes in the class
        for (name, attribute) in iteritems(self._get_base_attributes()):
            if name in self._excluded_keys:
                continue

            value = getattr(self, name)
            if value is None or name not in self.get_ds():
                continue

            new_value = getattr(dest, name)
            if new_value is None or name not in dest.get_ds():
                setattr(dest, name, value)
                continue

            # Determine if we can merge
            if name not in self._combine_keys or type(value) != type(new_value):
                if not extend:
                    setattr(dest, name, value)

            elif isinstance(value, dict):
                if extend:
                    setattr(dest, name, merge_hash(new_value, value))
                else:
                    setattr(dest, name, merge_hash(value, new_value))

            elif isinstance(value, list):
                if extend:
                    new_value.extend(value)
                    setattr(dest, name, new_value)
                else:
                    value.extend(new_value)
                    setattr(dest, name, value)

            elif not extend:
                setattr(dest, name, value)
Exemple #22
0
    def _execute(self, variables=None):
        '''
        The primary workhorse of the executor system, this runs the task
        on the specified host (which may be the delegated_to host) and handles
        the retry/until and block rescue/always execution
        '''

        if variables is None:
            variables = self._job_vars

        templar = Templar(loader=self._loader,
                          shared_loader_obj=self._shared_loader_obj,
                          variables=variables)

        context_validation_error = None
        try:
            # apply the given task's information to the connection info,
            # which may override some fields already set by the play or
            # the options specified on the command line
            self._play_context = self._play_context.set_task_and_variable_override(
                task=self._task, variables=variables, templar=templar)

            # fields set from the play/task may be based on variables, so we have to
            # do the same kind of post validation step on it here before we use it.
            self._play_context.post_validate(templar=templar)

            # We also add "magic" variables back into the variables dict to make sure
            # a certain subset of variables exist.
            self._play_context.update_vars(variables)
        except AnsibleError as e:
            # save the error, which we'll raise later if we don't end up
            # skipping this task during the conditional evaluation step
            context_validation_error = e

        # Evaluate the conditional (if any) for this task, which we do before running
        # the final task post-validation. We do this before the post validation due to
        # the fact that the conditional may specify that the task be skipped due to a
        # variable not being present which would otherwise cause validation to fail
        try:
            if not self._task.evaluate_conditional(templar, variables):
                display.debug("when evaluation failed, skipping this task")
                return dict(changed=False,
                            skipped=True,
                            skip_reason='Conditional check failed',
                            _ansible_no_log=self._play_context.no_log)
        except AnsibleError:
            # skip conditional exception in the case of includes as the vars needed might not be avaiable except in the included tasks or due to tags
            if self._task.action != 'include':
                raise

        # if we ran into an error while setting up the PlayContext, raise it now
        if context_validation_error is not None:
            raise context_validation_error

        # if this task is a TaskInclude, we just return now with a success code so the
        # main thread can expand the task list for the given host
        if self._task.action == 'include':
            include_variables = self._task.args.copy()
            include_file = include_variables.pop('_raw_params', None)
            if not include_file:
                return dict(failed=True,
                            msg="No include file was specified to the include")

            include_file = templar.template(include_file)
            return dict(include=include_file,
                        include_variables=include_variables)

        # Now we do final validation on the task, which sets all fields to their final values.
        self._task.post_validate(templar=templar)
        if '_variable_params' in self._task.args:
            variable_params = self._task.args.pop('_variable_params')
            if isinstance(variable_params, dict):
                display.deprecated(
                    "Using variables for task params is unsafe, especially if the variables come from an external source like facts"
                )
                variable_params.update(self._task.args)
                self._task.args = variable_params

        # get the connection and the handler for this execution
        self._connection = self._get_connection(variables=variables,
                                                templar=templar)
        self._connection.set_host_overrides(host=self._host)

        self._handler = self._get_action_handler(connection=self._connection,
                                                 templar=templar)

        # And filter out any fields which were set to default(omit), and got the omit token value
        omit_token = variables.get('omit')
        if omit_token is not None:
            self._task.args = dict((i[0], i[1])
                                   for i in iteritems(self._task.args)
                                   if i[1] != omit_token)

        # Read some values from the task, so that we can modify them if need be
        if self._task.until is not None:
            retries = self._task.retries
            if retries <= 0:
                retries = 1
        else:
            retries = 1

        delay = self._task.delay
        if delay < 0:
            delay = 1

        # make a copy of the job vars here, in case we need to update them
        # with the registered variable value later on when testing conditions
        #vars_copy = variables.copy()
        vars_copy = variables.copy()

        display.debug("starting attempt loop")
        result = None
        for attempt in range(retries):
            if attempt > 0:
                display.display(
                    "FAILED - RETRYING: %s (%d retries left). Result was: %s" %
                    (self._task, retries - attempt, result),
                    color="dark gray")
                result['attempts'] = attempt + 1

            display.debug("running the handler")
            try:
                result = self._handler.run(task_vars=variables)
            except AnsibleConnectionFailure as e:
                return dict(unreachable=True, msg=str(e))
            display.debug("handler run complete")

            if self._task. async > 0:
                # the async_wrapper module returns dumped JSON via its stdout
                # response, so we parse it here and replace the result
                try:
                    result = json.loads(result.get('stdout'))
                except (TypeError, ValueError) as e:
                    return dict(
                        failed=True,
                        msg="The async task did not return valid JSON: %s" %
                        str(e))

                if self._task.poll > 0:
                    result = self._poll_async_result(result=result,
                                                     templar=templar)

            # helper methods for use below in evaluating changed/failed_when
            def _evaluate_changed_when_result(result):
                if self._task.changed_when is not None:
                    cond = Conditional(loader=self._loader)
                    cond.when = [self._task.changed_when]
                    result['changed'] = cond.evaluate_conditional(
                        templar, vars_copy)

            def _evaluate_failed_when_result(result):
                if self._task.failed_when is not None:
                    cond = Conditional(loader=self._loader)
                    cond.when = [self._task.failed_when]
                    failed_when_result = cond.evaluate_conditional(
                        templar, vars_copy)
                    result['failed_when_result'] = result[
                        'failed'] = failed_when_result
                    return failed_when_result
                return False

            # update the local copy of vars with the registered value, if specified,
            # or any facts which may have been generated by the module execution
            if self._task.register:
                vars_copy[self._task.register] = result

            if 'ansible_facts' in result:
                vars_copy.update(result['ansible_facts'])

            # set the failed property if the result has a non-zero rc. This will be
            # overridden below if the failed_when property is set
            if result.get('rc', 0) != 0:
                result['failed'] = True

            # if we didn't skip this task, use the helpers to evaluate the changed/
            # failed_when properties
            if 'skipped' not in result:
                _evaluate_changed_when_result(result)
                _evaluate_failed_when_result(result)

            if attempt < retries - 1:
                cond = Conditional(loader=self._loader)
                cond.when = self._task.until
                if cond.evaluate_conditional(templar, vars_copy):
                    break

                # no conditional check, or it failed, so sleep for the specified time
                time.sleep(delay)

            elif 'failed' not in result:
                break
Exemple #23
0
    def _process_pending_results(self,
                                 iterator,
                                 one_pass=False,
                                 max_passes=None):
        '''
        Reads results off the final queue and takes appropriate action
        based on the result (executing callbacks, updating state, etc.).
        '''

        ret_results = []

        def get_original_host(host_name):
            host_name = to_text(host_name)
            if host_name in self._inventory._hosts_cache:
                return self._inventory._hosts_cache[host_name]
            else:
                return self._inventory.get_host(host_name)

        def search_handler_blocks_by_name(handler_name, handler_blocks):
            for handler_block in handler_blocks:
                for handler_task in handler_block.block:
                    if handler_task.name:
                        handler_vars = self._variable_manager.get_vars(
                            loader=self._loader,
                            play=iterator._play,
                            task=handler_task)
                        templar = Templar(loader=self._loader,
                                          variables=handler_vars)
                        try:
                            # first we check with the full result of get_name(), which may
                            # include the role name (if the handler is from a role). If that
                            # is not found, we resort to the simple name field, which doesn't
                            # have anything extra added to it.
                            target_handler_name = templar.template(
                                handler_task.name)
                            if target_handler_name == handler_name:
                                return handler_task
                            else:
                                target_handler_name = templar.template(
                                    handler_task.get_name())
                                if target_handler_name == handler_name:
                                    return handler_task
                        except (UndefinedError, AnsibleUndefinedVariable):
                            # We skip this handler due to the fact that it may be using
                            # a variable in the name that was conditionally included via
                            # set_fact or some other method, and we don't want to error
                            # out unnecessarily
                            continue
            return None

        def search_handler_blocks_by_uuid(handler_uuid, handler_blocks):
            for handler_block in handler_blocks:
                for handler_task in handler_block.block:
                    if handler_uuid == handler_task._uuid:
                        return handler_task
            return None

        def parent_handler_match(target_handler, handler_name):
            if target_handler:
                if isinstance(target_handler, (TaskInclude, IncludeRole)):
                    try:
                        handler_vars = self._variable_manager.get_vars(
                            loader=self._loader,
                            play=iterator._play,
                            task=target_handler)
                        templar = Templar(loader=self._loader,
                                          variables=handler_vars)
                        target_handler_name = templar.template(
                            target_handler.name)
                        if target_handler_name == handler_name:
                            return True
                        else:
                            target_handler_name = templar.template(
                                target_handler.get_name())
                            if target_handler_name == handler_name:
                                return True
                    except (UndefinedError, AnsibleUndefinedVariable):
                        pass
                return parent_handler_match(target_handler._parent,
                                            handler_name)
            else:
                return False

        # a Templar class to use for templating things later, as we're using
        # original/non-validated objects here on the manager side. We set the
        # variables in use later inside the loop below
        templar = Templar(loader=self._loader)

        cur_pass = 0
        while True:
            try:
                self._results_lock.acquire()
                task_result = self._results.pop()
            except IndexError:
                break
            finally:
                self._results_lock.release()

            # get the original host and task.  We then assign them to the TaskResult for use in callbacks/etc.
            original_host = get_original_host(task_result._host)
            original_task = iterator.get_original_task(original_host,
                                                       task_result._task)

            task_result._host = original_host
            task_result._task = original_task

            # get the correct loop var for use later
            if original_task.loop_control:
                loop_var = original_task.loop_control.loop_var or 'item'
            else:
                loop_var = 'item'

            # get the vars for this task/host pair, make them the active set of vars for our templar above
            task_vars = self._variable_manager.get_vars(loader=self._loader,
                                                        play=iterator._play,
                                                        host=original_host,
                                                        task=original_task)
            self.add_tqm_variables(task_vars, play=iterator._play)
            templar.set_available_variables(task_vars)

            # send callbacks for 'non final' results
            if '_ansible_retry' in task_result._result:
                self._tqm.send_callback('v2_runner_retry', task_result)
                continue
            elif '_ansible_item_result' in task_result._result:
                if task_result.is_failed() or task_result.is_unreachable():
                    self._tqm.send_callback('v2_runner_item_on_failed',
                                            task_result)
                elif task_result.is_skipped():
                    self._tqm.send_callback('v2_runner_item_on_skipped',
                                            task_result)
                else:
                    if 'diff' in task_result._result:
                        if self._diff:
                            self._tqm.send_callback('v2_on_file_diff',
                                                    task_result)
                    self._tqm.send_callback('v2_runner_item_on_ok',
                                            task_result)
                continue

            run_once = templar.template(original_task.run_once)
            if original_task.register:
                host_list = self.get_task_hosts(iterator, original_host,
                                                original_task)

                clean_copy = strip_internal_keys(task_result._result)
                if 'invocation' in clean_copy:
                    del clean_copy['invocation']

                for target_host in host_list:
                    self._variable_manager.set_nonpersistent_facts(
                        target_host, {original_task.register: clean_copy})

            # all host status messages contain 2 entries: (msg, task_result)
            role_ran = False
            if task_result.is_failed():
                role_ran = True
                ignore_errors = templar.template(original_task.ignore_errors)
                if not ignore_errors:
                    display.debug("marking %s as failed" % original_host.name)
                    if run_once:
                        # if we're using run_once, we have to fail every host here
                        for h in self._inventory.get_hosts(
                                iterator._play.hosts):
                            if h.name not in self._tqm._unreachable_hosts:
                                state, _ = iterator.get_next_task_for_host(
                                    h, peek=True)
                                iterator.mark_host_failed(h)
                                state, new_task = iterator.get_next_task_for_host(
                                    h, peek=True)
                    else:
                        iterator.mark_host_failed(original_host)

                    # increment the failed count for this host
                    self._tqm._stats.increment('failures', original_host.name)

                    # grab the current state and if we're iterating on the rescue portion
                    # of a block then we save the failed task in a special var for use
                    # within the rescue/always
                    state, _ = iterator.get_next_task_for_host(original_host,
                                                               peek=True)

                    if iterator.is_failed(
                            original_host
                    ) and state and state.run_state == iterator.ITERATING_COMPLETE:
                        self._tqm._failed_hosts[original_host.name] = True

                    if state and state.run_state == iterator.ITERATING_RESCUE:
                        self._variable_manager.set_nonpersistent_facts(
                            original_host,
                            dict(
                                ansible_failed_task=original_task.serialize(),
                                ansible_failed_result=task_result._result,
                            ),
                        )
                else:
                    self._tqm._stats.increment('ok', original_host.name)
                    if 'changed' in task_result._result and task_result._result[
                            'changed']:
                        self._tqm._stats.increment('changed',
                                                   original_host.name)
                self._tqm.send_callback('v2_runner_on_failed',
                                        task_result,
                                        ignore_errors=ignore_errors)
            elif task_result.is_unreachable():
                self._tqm._unreachable_hosts[original_host.name] = True
                iterator._play._removed_hosts.append(original_host.name)
                self._tqm._stats.increment('dark', original_host.name)
                self._tqm.send_callback('v2_runner_on_unreachable',
                                        task_result)
            elif task_result.is_skipped():
                self._tqm._stats.increment('skipped', original_host.name)
                self._tqm.send_callback('v2_runner_on_skipped', task_result)
            else:
                role_ran = True

                if original_task.loop:
                    # this task had a loop, and has more than one result, so
                    # loop over all of them instead of a single result
                    result_items = task_result._result.get('results', [])
                else:
                    result_items = [task_result._result]

                for result_item in result_items:
                    if '_ansible_notify' in result_item:
                        if task_result.is_changed():
                            # The shared dictionary for notified handlers is a proxy, which
                            # does not detect when sub-objects within the proxy are modified.
                            # So, per the docs, we reassign the list so the proxy picks up and
                            # notifies all other threads
                            for handler_name in result_item['_ansible_notify']:
                                found = False
                                # Find the handler using the above helper.  First we look up the
                                # dependency chain of the current task (if it's from a role), otherwise
                                # we just look through the list of handlers in the current play/all
                                # roles and use the first one that matches the notify name
                                target_handler = search_handler_blocks_by_name(
                                    handler_name, iterator._play.handlers)
                                if target_handler is not None:
                                    found = True
                                    if original_host not in self._notified_handlers[
                                            target_handler._uuid]:
                                        self._notified_handlers[
                                            target_handler._uuid].append(
                                                original_host)
                                        # FIXME: should this be a callback?
                                        display.vv("NOTIFIED HANDLER %s" %
                                                   (handler_name, ))
                                else:
                                    # As there may be more than one handler with the notified name as the
                                    # parent, so we just keep track of whether or not we found one at all
                                    for target_handler_uuid in self._notified_handlers:
                                        target_handler = search_handler_blocks_by_uuid(
                                            target_handler_uuid,
                                            iterator._play.handlers)
                                        if target_handler and parent_handler_match(
                                                target_handler, handler_name):
                                            self._notified_handlers[
                                                target_handler._uuid].append(
                                                    original_host)
                                            display.vv(
                                                "NOTIFIED HANDLER %s" %
                                                (target_handler.get_name(), ))
                                            found = True

                                if handler_name in self._listening_handlers:
                                    for listening_handler_uuid in self._listening_handlers[
                                            handler_name]:
                                        listening_handler = search_handler_blocks_by_uuid(
                                            listening_handler_uuid,
                                            iterator._play.handlers)
                                        if listening_handler is not None:
                                            found = True
                                        else:
                                            continue
                                        if original_host not in self._notified_handlers[
                                                listening_handler._uuid]:
                                            self._notified_handlers[
                                                listening_handler.
                                                _uuid].append(original_host)
                                            display.vv(
                                                "NOTIFIED HANDLER %s" %
                                                (listening_handler.get_name(),
                                                 ))

                                # and if none were found, then we raise an error
                                if not found:
                                    msg = "The requested handler '%s' was not found in either the main handlers list nor in the listening handlers list" % handler_name
                                    if C.ERROR_ON_MISSING_HANDLER:
                                        raise AnsibleError(msg)
                                    else:
                                        display.warning(msg)

                    if 'add_host' in result_item:
                        # this task added a new host (add_host module)
                        new_host_info = result_item.get('add_host', dict())
                        self._add_host(new_host_info, iterator)

                    elif 'add_group' in result_item:
                        # this task added a new group (group_by module)
                        self._add_group(original_host, result_item)

                    if 'ansible_facts' in result_item:

                        # if delegated fact and we are delegating facts, we need to change target host for them
                        if original_task.delegate_to is not None and original_task.delegate_facts:
                            item = result_item.get(loop_var, None)
                            if item is not None:
                                task_vars[loop_var] = item
                            host_name = templar.template(
                                original_task.delegate_to)
                            actual_host = self._inventory.get_host(host_name)
                            if actual_host is None:
                                actual_host = Host(name=host_name)
                        else:
                            actual_host = original_host

                        host_list = self.get_task_hosts(
                            iterator, actual_host, original_task)
                        if original_task.action == 'include_vars':

                            for (var_name, var_value) in iteritems(
                                    result_item['ansible_facts']):
                                # find the host we're actually referring too here, which may
                                # be a host that is not really in inventory at all
                                for target_host in host_list:
                                    self._variable_manager.set_host_variable(
                                        target_host, var_name, var_value)
                        else:
                            for target_host in host_list:
                                if original_task.action == 'set_fact':
                                    self._variable_manager.set_nonpersistent_facts(
                                        target_host,
                                        result_item['ansible_facts'].copy())
                                else:
                                    self._variable_manager.set_host_facts(
                                        target_host,
                                        result_item['ansible_facts'].copy())

                    if 'ansible_stats' in result_item and 'data' in result_item[
                            'ansible_stats'] and result_item['ansible_stats'][
                                'data']:

                        if 'per_host' not in result_item[
                                'ansible_stats'] or result_item[
                                    'ansible_stats']['per_host']:
                            host_list = self.get_task_hosts(
                                iterator, original_host, original_task)
                        else:
                            host_list = [None]

                        data = result_item['ansible_stats']['data']
                        aggregate = 'aggregate' in result_item[
                            'ansible_stats'] and result_item['ansible_stats'][
                                'aggregate']
                        for myhost in host_list:
                            for k in data.keys():
                                if aggregate:
                                    self._tqm._stats.update_custom_stats(
                                        k, data[k], myhost)
                                else:
                                    self._tqm._stats.set_custom_stats(
                                        k, data[k], myhost)

                if 'diff' in task_result._result:
                    if self._diff:
                        self._tqm.send_callback('v2_on_file_diff', task_result)

                if original_task.action not in ['include', 'include_role']:
                    self._tqm._stats.increment('ok', original_host.name)
                    if 'changed' in task_result._result and task_result._result[
                            'changed']:
                        self._tqm._stats.increment('changed',
                                                   original_host.name)

                # finally, send the ok for this task
                self._tqm.send_callback('v2_runner_on_ok', task_result)

            self._pending_results -= 1
            if original_host.name in self._blocked_hosts:
                del self._blocked_hosts[original_host.name]

            # If this is a role task, mark the parent role as being run (if
            # the task was ok or failed, but not skipped or unreachable)
            if original_task._role is not None and role_ran:  #TODO:  and original_task.action != 'include_role':?
                # lookup the role in the ROLE_CACHE to make sure we're dealing
                # with the correct object and mark it as executed
                for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[
                        original_task._role._role_name]):
                    if role_obj._uuid == original_task._role._uuid:
                        role_obj._had_task_run[original_host.name] = True

            ret_results.append(task_result)

            if one_pass or max_passes is not None and (cur_pass +
                                                       1) >= max_passes:
                break

            cur_pass += 1

        return ret_results
Exemple #24
0
    def _get_magic_variables(self, loader, play, host, task, include_hostvars,
                             include_delegate_to):
        '''
        Returns a dictionary of so-called "magic" variables in Ansible,
        which are special variables we set internally for use.
        '''

        variables = dict()
        variables['playbook_dir'] = loader.get_basedir()

        if host:
            variables['group_names'] = sorted([
                group.name for group in host.get_groups()
                if group.name != 'all'
            ])

            if self._inventory:
                variables['groups'] = self._inventory.get_group_dict()

        if play:
            variables['role_names'] = [r._role_name for r in play.roles]

        if task:
            if task._role:
                variables['role_name'] = task._role.get_name()
                variables['role_path'] = task._role._role_path
                variables['role_uuid'] = text_type(task._role._uuid)

        if self._inventory is not None:
            variables['inventory_dir'] = self._inventory.basedir()
            variables['inventory_file'] = self._inventory.src()
            if play:
                # add the list of hosts in the play, as adjusted for limit/filters
                variables['ansible_play_hosts_all'] = [
                    x.name for x in self._inventory.get_hosts(
                        pattern=play.hosts or 'all', ignore_restrictions=True)
                ]
                variables['ansible_play_hosts'] = [
                    x for x in variables['ansible_play_hosts_all']
                    if x not in play._removed_hosts
                ]
                variables['ansible_play_batch'] = [
                    x.name for x in self._inventory.get_hosts()
                    if x.name not in play._removed_hosts
                ]

                #DEPRECATED: play_hosts should be deprecated in favor of ansible_play_batch,
                #  however this would take work in the templating engine, so for now we'll add both
                variables['play_hosts'] = variables['ansible_play_batch']

        # the 'omit' value alows params to be left out if the variable they are based on is undefined
        variables['omit'] = self._omit_token
        variables['ansible_version'] = CLI.version_info(gitinfo=False)
        # Set options vars
        for option, option_value in iteritems(self._options_vars):
            variables[option] = option_value

        if self._hostvars is not None and include_hostvars:
            variables['hostvars'] = self._hostvars

        return variables
Exemple #25
0
    def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True):
        '''
        Transfer and run a module along with its arguments.
        '''
        if task_vars is None:
            task_vars = dict()

        # if a module name was not specified for this execution, use
        # the action from the task
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args

        # set check mode in the module arguments, if required
        if self._play_context.check_mode:
            if not self._supports_check_mode:
                raise AnsibleError("check mode is not supported for this operation")
            module_args['_ansible_check_mode'] = True
        else:
            module_args['_ansible_check_mode'] = False

        # Get the connection user for permission checks
        remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user

        # set no log in the module arguments, if required
        module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG

        # set debug in the module arguments, if required
        module_args['_ansible_debug'] = C.DEFAULT_DEBUG

        # let module know we are in diff mode
        module_args['_ansible_diff'] = self._play_context.diff

        # let module know our verbosity
        module_args['_ansible_verbosity'] = display.verbosity

        # give the module information about the ansible version
        module_args['_ansible_version'] = __version__

        # give the module information about its name
        module_args['_ansible_module_name'] = module_name

        # set the syslog facility to be used in the module
        module_args['_ansible_syslog_facility'] = task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY)

        # let module know about filesystems that selinux treats specially
        module_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS

        (module_style, shebang, module_data, module_path) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
        display.vvv("Using module file %s" % module_path)
        if not shebang and module_style != 'binary':
            raise AnsibleError("module (%s) is missing interpreter line" % module_name)

        # a remote tmp path may be necessary and not already created
        remote_module_path = None
        args_file_path = None
        if not tmp and self._late_needs_tmp_path(tmp, module_style):
            tmp = self._make_tmp_path(remote_user)

        if tmp and \
         (module_style != 'new' or \
         not self._connection.has_pipelining or \
         not self._play_context.pipelining or \
         C.DEFAULT_KEEP_REMOTE_FILES or \
         self._play_context.become_method == 'su'):
            remote_module_filename = self._connection._shell.get_remote_filename(module_path)
            remote_module_path = self._connection._shell.join_path(tmp, remote_module_filename)
            if module_style in ('old', 'non_native_want_json', 'binary'):
                # we'll also need a temp file to hold our module arguments
                args_file_path = self._connection._shell.join_path(tmp, 'args')

        if remote_module_path or module_style != 'new':
            display.debug("transferring module to remote %s" % remote_module_path)
            if module_style == 'binary':
                self._transfer_file(module_path, remote_module_path)
            else:
                self._transfer_data(remote_module_path, module_data)
            if module_style == 'old':
                # we need to dump the module args to a k=v string in a file on
                # the remote system, which can be read and parsed by the module
                args_data = ""
                for k,v in iteritems(module_args):
                    args_data += '%s=%s ' % (k, pipes.quote(text_type(v)))
                self._transfer_data(args_file_path, args_data)
            elif module_style in ('non_native_want_json', 'binary'):
                self._transfer_data(args_file_path, json.dumps(module_args))
            display.debug("done transferring module to remote")

        environment_string = self._compute_environment_string()

        remote_files = None

        if args_file_path:
            remote_files = tmp, remote_module_path, args_file_path
        elif remote_module_path:
            remote_files = tmp, remote_module_path

        # Fix permissions of the tmp path and tmp files.  This should be
        # called after all files have been transferred.
        if remote_files:
            self._fixup_perms2(remote_files, remote_user)

        cmd = ""
        in_data = None

        if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':
            in_data = module_data
        else:
            if remote_module_path:
                cmd = remote_module_path

        rm_tmp = None
        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if not self._play_context.become or self._play_context.become_user == 'root':
                # not sudoing or sudoing to root, so can cleanup files in the same step
                rm_tmp = tmp

        cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path, rm_tmp=rm_tmp)
        cmd = cmd.strip()

        sudoable = True
        if module_name == "accelerate":
            # always run the accelerate module as the user
            # specified in the play, not the sudo_user
            sudoable = False

        res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)

        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if self._play_context.become and self._play_context.become_user != 'root':
                # not sudoing to root, so maybe can't delete files as that other user
                # have to clean up temp files as original user in a second step
                tmp_rm_cmd = self._connection._shell.remove(tmp, recurse=True)
                tmp_rm_res = self._low_level_execute_command(tmp_rm_cmd, sudoable=False)
                tmp_rm_data = self._parse_returned_data(tmp_rm_res)
                if tmp_rm_data.get('rc', 0) != 0:
                    display.warning('Error deleting remote temporary files (rc: {0}, stderr: {1})'.format(tmp_rm_res.get('rc'),
                        tmp_rm_res.get('stderr', 'No error string available.')))

        # parse the main result
        data = self._parse_returned_data(res)

        # pre-split stdout into lines, if stdout is in the data and there
        # isn't already a stdout_lines value there
        if 'stdout' in data and 'stdout_lines' not in data:
            data['stdout_lines'] = data.get('stdout', u'').splitlines()

        display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
        return data
Exemple #26
0
    def _execute_module(self,
                        module_name=None,
                        module_args=None,
                        tmp=None,
                        task_vars=None,
                        persist_files=False,
                        delete_remote_tmp=True):
        '''
        Transfer and run a module along with its arguments.
        '''
        if task_vars is None:
            task_vars = dict()

        # if a module name was not specified for this execution, use
        # the action from the task
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args

        # Get the connection user for permission checks
        remote_user = task_vars.get(
            'ansible_ssh_user') or self._play_context.remote_user

        self._update_module_args(module_name, module_args, task_vars)

        (module_style, shebang, module_data,
         module_path) = self._configure_module(module_name=module_name,
                                               module_args=module_args,
                                               task_vars=task_vars)
        display.vvv("Using module file %s" % module_path)
        if not shebang and module_style != 'binary':
            raise AnsibleError("module (%s) is missing interpreter line" %
                               module_name)

        # a remote tmp path may be necessary and not already created
        remote_module_path = None
        args_file_path = None
        if not tmp and self._late_needs_tmp_path(tmp, module_style):
            tmp = self._make_tmp_path(remote_user)

        if tmp and \
         (module_style != 'new' or \
         not self._connection.has_pipelining or \
         not self._play_context.pipelining or \
         C.DEFAULT_KEEP_REMOTE_FILES or \
         self._play_context.become_method == 'su'):
            remote_module_filename = self._connection._shell.get_remote_filename(
                module_path)
            remote_module_path = self._connection._shell.join_path(
                tmp, remote_module_filename)
            if module_style in ('old', 'non_native_want_json', 'binary'):
                # we'll also need a temp file to hold our module arguments
                args_file_path = self._connection._shell.join_path(tmp, 'args')

        if remote_module_path or module_style != 'new':
            display.debug("transferring module to remote %s" %
                          remote_module_path)
            if module_style == 'binary':
                self._transfer_file(module_path, remote_module_path)
            else:
                self._transfer_data(remote_module_path, module_data)
            if module_style == 'old':
                # we need to dump the module args to a k=v string in a file on
                # the remote system, which can be read and parsed by the module
                args_data = ""
                for k, v in iteritems(module_args):
                    args_data += '%s=%s ' % (k, shlex_quote(text_type(v)))
                self._transfer_data(args_file_path, args_data)
            elif module_style in ('non_native_want_json', 'binary'):
                self._transfer_data(args_file_path, json.dumps(module_args))
            display.debug("done transferring module to remote")

        environment_string = self._compute_environment_string()

        remote_files = None

        if args_file_path:
            remote_files = tmp, remote_module_path, args_file_path
        elif remote_module_path:
            remote_files = tmp, remote_module_path

        # Fix permissions of the tmp path and tmp files.  This should be
        # called after all files have been transferred.
        if remote_files:
            self._fixup_perms2(remote_files, remote_user)

        cmd = ""
        in_data = None

        if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':
            in_data = module_data
        else:
            if remote_module_path:
                cmd = remote_module_path

        rm_tmp = None
        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if not self._play_context.become or self._play_context.become_user == 'root':
                # not sudoing or sudoing to root, so can cleanup files in the same step
                rm_tmp = tmp

        cmd = self._connection._shell.build_module_command(
            environment_string,
            shebang,
            cmd,
            arg_path=args_file_path,
            rm_tmp=rm_tmp)
        cmd = cmd.strip()

        sudoable = True
        if module_name == "accelerate":
            # always run the accelerate module as the user
            # specified in the play, not the sudo_user
            sudoable = False

        res = self._low_level_execute_command(cmd,
                                              sudoable=sudoable,
                                              in_data=in_data)

        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if self._play_context.become and self._play_context.become_user != 'root':
                # not sudoing to root, so maybe can't delete files as that other user
                # have to clean up temp files as original user in a second step
                tmp_rm_cmd = self._connection._shell.remove(tmp, recurse=True)
                tmp_rm_res = self._low_level_execute_command(tmp_rm_cmd,
                                                             sudoable=False)
                tmp_rm_data = self._parse_returned_data(tmp_rm_res)
                if tmp_rm_data.get('rc', 0) != 0:
                    display.warning(
                        'Error deleting remote temporary files (rc: {0}, stderr: {1})'
                        .format(
                            tmp_rm_res.get('rc'),
                            tmp_rm_res.get('stderr',
                                           'No error string available.')))

        # parse the main result
        data = self._parse_returned_data(res)

        # pre-split stdout into lines, if stdout is in the data and there
        # isn't already a stdout_lines value there
        if 'stdout' in data and 'stdout_lines' not in data:
            data['stdout_lines'] = data.get('stdout', u'').splitlines()

        display.debug("done with _execute_module (%s, %s)" %
                      (module_name, module_args))
        return data
Exemple #27
0
 def get_failed_hosts(self):
     return dict((host, True) for (host, state) in iteritems(self._host_states) if self._check_failed_state(state))
Exemple #28
0
    def preprocess_data(self, ds):
        '''
        tasks are especially complex arguments so need pre-processing.
        keep it short.
        '''

        assert isinstance(ds, dict)

        # the new, cleaned datastructure, which will have legacy
        # items reduced to a standard structure suitable for the
        # attributes of the task class
        new_ds = AnsibleMapping()
        if isinstance(ds, AnsibleBaseYAMLObject):
            new_ds.ansible_pos = ds.ansible_pos

        # use the args parsing class to determine the action, args,
        # and the delegate_to value from the various possible forms
        # supported as legacy
        args_parser = ModuleArgsParser(task_ds=ds)
        try:
            (action, args, delegate_to) = args_parser.parse()
        except AnsibleParserError as e:
            raise AnsibleParserError(to_native(e), obj=ds)

        # the command/shell/script modules used to support the `cmd` arg,
        # which corresponds to what we now call _raw_params, so move that
        # value over to _raw_params (assuming it is empty)
        if action in ('command', 'shell', 'script'):
            if 'cmd' in args:
                if args.get('_raw_params', '') != '':
                    raise AnsibleError(
                        "The 'cmd' argument cannot be used when other raw parameters are specified."
                        " Please put everything in one or the other place.",
                        obj=ds)
                args['_raw_params'] = args.pop('cmd')

        new_ds['action'] = action
        new_ds['args'] = args
        new_ds['delegate_to'] = delegate_to

        # we handle any 'vars' specified in the ds here, as we may
        # be adding things to them below (special handling for includes).
        # When that deprecated feature is removed, this can be too.
        if 'vars' in ds:
            # _load_vars is defined in Base, and is used to load a dictionary
            # or list of dictionaries in a standard way
            new_ds['vars'] = self._load_vars(None, ds.get('vars'))
        else:
            new_ds['vars'] = dict()

        for (k, v) in iteritems(ds):
            if k in ('action', 'local_action', 'args',
                     'delegate_to') or k == action or k == 'shell':
                # we don't want to re-assign these values, which were
                # determined by the ModuleArgsParser() above
                continue
            elif k.replace("with_", "") in lookup_loader:
                self._preprocess_loop(ds, new_ds, k, v)
            else:
                # pre-2.0 syntax allowed variables for include statements at the
                # top level of the task, so we move those into the 'vars' dictionary
                # here, and show a deprecation message as we will remove this at
                # some point in the future.
                if action == 'include' and k not in self._valid_attrs and k not in self.DEPRECATED_ATTRIBUTES:
                    display.deprecated(
                        "Specifying include variables at the top-level of the task is deprecated."
                        " Please see:\nhttp://docs.ansible.com/ansible/playbooks_roles.html#task-include-files-and-encouraging-reuse\n\n"
                        " for currently supported syntax regarding included files and variables"
                    )
                    new_ds['vars'][k] = v
                else:
                    new_ds[k] = v

        return super(Task, self).preprocess_data(new_ds)
Exemple #29
0
    def _process_pending_results(self, iterator, one_pass=False):
        '''
        Reads results off the final queue and takes appropriate action
        based on the result (executing callbacks, updating state, etc.).
        '''

        ret_results = []

        def get_original_host(host_name):
            host_name = to_unicode(host_name)
            if host_name in self._inventory._hosts_cache:
                return self._inventory._hosts_cache[host_name]
            else:
                return self._inventory.get_host(host_name)

        def search_handler_blocks(handler_name, handler_blocks):
            for handler_block in handler_blocks:
                for handler_task in handler_block.block:
                    handler_vars = self._variable_manager.get_vars(
                        loader=self._loader,
                        play=iterator._play,
                        task=handler_task)
                    templar = Templar(loader=self._loader,
                                      variables=handler_vars)
                    try:
                        # first we check with the full result of get_name(), which may
                        # include the role name (if the handler is from a role). If that
                        # is not found, we resort to the simple name field, which doesn't
                        # have anything extra added to it.
                        target_handler_name = templar.template(
                            handler_task.name)
                        if target_handler_name == handler_name:
                            return handler_task
                        else:
                            target_handler_name = templar.template(
                                handler_task.get_name())
                            if target_handler_name == handler_name:
                                return handler_task
                    except (UndefinedError, AnsibleUndefinedVariable) as e:
                        # We skip this handler due to the fact that it may be using
                        # a variable in the name that was conditionally included via
                        # set_fact or some other method, and we don't want to error
                        # out unnecessarily
                        continue
            return None

        def parent_handler_match(target_handler, handler_name):
            if target_handler:
                if isinstance(target_handler, (TaskInclude, IncludeRole)):
                    try:
                        handler_vars = self._variable_manager.get_vars(
                            loader=self._loader,
                            play=iterator._play,
                            task=target_handler)
                        templar = Templar(loader=self._loader,
                                          variables=handler_vars)
                        target_handler_name = templar.template(
                            target_handler.name)
                        if target_handler_name == handler_name:
                            return True
                        else:
                            target_handler_name = templar.template(
                                target_handler.get_name())
                            if target_handler_name == handler_name:
                                return True
                    except (UndefinedError, AnsibleUndefinedVariable) as e:
                        pass
                return parent_handler_match(target_handler._parent,
                                            handler_name)
            else:
                return False

        passes = 0
        while not self._tqm._terminated:
            try:
                task_result = self._final_q.get(timeout=0.001)
                original_host = get_original_host(task_result._host)
                original_task = iterator.get_original_task(
                    original_host, task_result._task)
                task_result._host = original_host
                task_result._task = original_task

                # send callbacks for 'non final' results
                if '_ansible_retry' in task_result._result:
                    self._tqm.send_callback('v2_runner_retry', task_result)
                    continue
                elif '_ansible_item_result' in task_result._result:
                    if task_result.is_failed() or task_result.is_unreachable():
                        self._tqm.send_callback('v2_runner_item_on_failed',
                                                task_result)
                    elif task_result.is_skipped():
                        self._tqm.send_callback('v2_runner_item_on_skipped',
                                                task_result)
                    else:
                        self._tqm.send_callback('v2_runner_item_on_ok',
                                                task_result)
                    continue

                if original_task.register:
                    if original_task.run_once:
                        host_list = [
                            host for host in self._inventory.get_hosts(
                                iterator._play.hosts)
                            if host.name not in self._tqm._unreachable_hosts
                        ]
                    else:
                        host_list = [original_host]

                    clean_copy = strip_internal_keys(task_result._result)
                    if 'invocation' in clean_copy:
                        del clean_copy['invocation']

                    for target_host in host_list:
                        self._variable_manager.set_nonpersistent_facts(
                            target_host, {original_task.register: clean_copy})

                # all host status messages contain 2 entries: (msg, task_result)
                role_ran = False
                if task_result.is_failed():
                    role_ran = True
                    if not original_task.ignore_errors:
                        display.debug("marking %s as failed" %
                                      original_host.name)
                        if original_task.run_once:
                            # if we're using run_once, we have to fail every host here
                            for h in self._inventory.get_hosts(
                                    iterator._play.hosts):
                                if h.name not in self._tqm._unreachable_hosts:
                                    state, _ = iterator.get_next_task_for_host(
                                        h, peek=True)
                                    iterator.mark_host_failed(h)
                                    state, new_task = iterator.get_next_task_for_host(
                                        h, peek=True)
                        else:
                            iterator.mark_host_failed(original_host)

                        # only add the host to the failed list officially if it has
                        # been failed by the iterator
                        if iterator.is_failed(original_host):
                            self._tqm._failed_hosts[original_host.name] = True
                            self._tqm._stats.increment('failures',
                                                       original_host.name)
                        else:
                            # otherwise, we grab the current state and if we're iterating on
                            # the rescue portion of a block then we save the failed task in a
                            # special var for use within the rescue/always
                            state, _ = iterator.get_next_task_for_host(
                                original_host, peek=True)
                            if state.run_state == iterator.ITERATING_RESCUE:
                                self._variable_manager.set_nonpersistent_facts(
                                    original_host,
                                    dict(
                                        ansible_failed_task=original_task.
                                        serialize(),
                                        ansible_failed_result=task_result.
                                        _result,
                                    ),
                                )
                    else:
                        self._tqm._stats.increment('ok', original_host.name)
                    self._tqm.send_callback(
                        'v2_runner_on_failed',
                        task_result,
                        ignore_errors=original_task.ignore_errors)
                elif task_result.is_unreachable():
                    self._tqm._unreachable_hosts[original_host.name] = True
                    self._tqm._stats.increment('dark', original_host.name)
                    self._tqm.send_callback('v2_runner_on_unreachable',
                                            task_result)
                elif task_result.is_skipped():
                    self._tqm._stats.increment('skipped', original_host.name)
                    self._tqm.send_callback('v2_runner_on_skipped',
                                            task_result)
                else:
                    role_ran = True

                    if original_task.loop:
                        # this task had a loop, and has more than one result, so
                        # loop over all of them instead of a single result
                        result_items = task_result._result.get('results', [])
                    else:
                        result_items = [task_result._result]

                    for result_item in result_items:
                        if '_ansible_notify' in result_item:
                            if task_result.is_changed():
                                # The shared dictionary for notified handlers is a proxy, which
                                # does not detect when sub-objects within the proxy are modified.
                                # So, per the docs, we reassign the list so the proxy picks up and
                                # notifies all other threads
                                for handler_name in result_item[
                                        '_ansible_notify']:
                                    # Find the handler using the above helper.  First we look up the
                                    # dependency chain of the current task (if it's from a role), otherwise
                                    # we just look through the list of handlers in the current play/all
                                    # roles and use the first one that matches the notify name
                                    if handler_name in self._listening_handlers:
                                        for listening_handler_name in self._listening_handlers[
                                                handler_name]:
                                            listening_handler = search_handler_blocks(
                                                listening_handler_name,
                                                iterator._play.handlers)
                                            if listening_handler is None:
                                                raise AnsibleError(
                                                    "The requested handler listener '%s' was not found in any of the known handlers"
                                                    % listening_handler_name)
                                            if original_host not in self._notified_handlers[
                                                    listening_handler]:
                                                self._notified_handlers[
                                                    listening_handler].append(
                                                        original_host)
                                                display.vv(
                                                    "NOTIFIED HANDLER %s" %
                                                    (listening_handler_name, ))

                                    else:
                                        target_handler = search_handler_blocks(
                                            handler_name,
                                            iterator._play.handlers)
                                        if target_handler is not None:
                                            if original_host not in self._notified_handlers[
                                                    target_handler]:
                                                self._notified_handlers[
                                                    target_handler].append(
                                                        original_host)
                                                # FIXME: should this be a callback?
                                                display.vv(
                                                    "NOTIFIED HANDLER %s" %
                                                    (handler_name, ))
                                        else:
                                            # As there may be more than one handler with the notified name as the
                                            # parent, so we just keep track of whether or not we found one at all
                                            found = False
                                            for target_handler in self._notified_handlers:
                                                if parent_handler_match(
                                                        target_handler,
                                                        handler_name):
                                                    self._notified_handlers[
                                                        target_handler].append(
                                                            original_host)
                                                    display.vv(
                                                        "NOTIFIED HANDLER %s" %
                                                        (target_handler.
                                                         get_name(), ))
                                                    found = True

                                            # and if none were found, then we raise an error
                                            if not found:
                                                raise AnsibleError(
                                                    "The requested handler '%s' was found in neither the main handlers list nor the listening handlers list"
                                                    % handler_name)

                        if 'add_host' in result_item:
                            # this task added a new host (add_host module)
                            new_host_info = result_item.get('add_host', dict())
                            self._add_host(new_host_info, iterator)

                        elif 'add_group' in result_item:
                            # this task added a new group (group_by module)
                            self._add_group(original_host, result_item)

                        elif 'ansible_facts' in result_item:
                            loop_var = 'item'
                            if original_task.loop_control:
                                loop_var = original_task.loop_control.loop_var or 'item'

                            item = result_item.get(loop_var, None)

                            if original_task.action == 'include_vars':
                                for (var_name, var_value) in iteritems(
                                        result_item['ansible_facts']):
                                    # find the host we're actually refering too here, which may
                                    # be a host that is not really in inventory at all
                                    if original_task.delegate_to is not None and original_task.delegate_facts:
                                        task_vars = self._variable_manager.get_vars(
                                            loader=self._loader,
                                            play=iterator._play,
                                            host=host,
                                            task=task)
                                        self.add_tqm_variables(
                                            task_vars, play=iterator._play)
                                        if item is not None:
                                            task_vars[loop_var] = item
                                        templar = Templar(loader=self._loader,
                                                          variables=task_vars)
                                        host_name = templar.template(
                                            original_task.delegate_to)
                                        actual_host = self._inventory.get_host(
                                            host_name)
                                        if actual_host is None:
                                            actual_host = Host(name=host_name)
                                    else:
                                        actual_host = original_host

                                    if original_task.run_once:
                                        host_list = [
                                            host for host in self._inventory.
                                            get_hosts(iterator._play.hosts)
                                            if host.name not in
                                            self._tqm._unreachable_hosts
                                        ]
                                    else:
                                        host_list = [actual_host]

                                    for target_host in host_list:
                                        self._variable_manager.set_host_variable(
                                            target_host, var_name, var_value)
                            else:
                                if original_task.run_once:
                                    host_list = [
                                        host
                                        for host in self._inventory.get_hosts(
                                            iterator._play.hosts) if host.name
                                        not in self._tqm._unreachable_hosts
                                    ]
                                else:
                                    host_list = [original_host]

                                for target_host in host_list:
                                    if original_task.action == 'set_fact':
                                        self._variable_manager.set_nonpersistent_facts(
                                            target_host,
                                            result_item['ansible_facts'].copy(
                                            ))
                                    else:
                                        self._variable_manager.set_host_facts(
                                            target_host,
                                            result_item['ansible_facts'].copy(
                                            ))

                    if 'diff' in task_result._result:
                        if self._diff:
                            self._tqm.send_callback('v2_on_file_diff',
                                                    task_result)

                    if original_task.action != 'include':
                        self._tqm._stats.increment('ok', original_host.name)
                        if 'changed' in task_result._result and task_result._result[
                                'changed']:
                            self._tqm._stats.increment('changed',
                                                       original_host.name)

                    # finally, send the ok for this task
                    self._tqm.send_callback('v2_runner_on_ok', task_result)

                self._pending_results -= 1
                if original_host.name in self._blocked_hosts:
                    del self._blocked_hosts[original_host.name]

                # If this is a role task, mark the parent role as being run (if
                # the task was ok or failed, but not skipped or unreachable)
                if original_task._role is not None and role_ran and original_task.action != 'include_role':
                    # lookup the role in the ROLE_CACHE to make sure we're dealing
                    # with the correct object and mark it as executed
                    for (entry,
                         role_obj) in iteritems(iterator._play.ROLE_CACHE[
                             original_task._role._role_name]):
                        if role_obj._uuid == original_task._role._uuid:
                            role_obj._had_task_run[original_host.name] = True

                ret_results.append(task_result)

            except Queue.Empty:
                passes += 1
                if passes > 2:
                    break

            if one_pass:
                break

        return ret_results
Exemple #30
0
    def post_validate(self, templar):
        '''
        we can't tell that everything is of the right type until we have
        all the variables.  Run basic types (from isa) as well as
        any _post_validate_<foo> functions.
        '''

        # save the omit value for later checking
        omit_value = templar._available_variables.get('omit')

        for (name, attribute) in iteritems(self._valid_attrs):

            if getattr(self, name) is None:
                if not attribute.required:
                    continue
                else:
                    raise AnsibleParserError("the field '%s' is required but was not set" % name)
            elif not attribute.always_post_validate and self.__class__.__name__ not in ('Task', 'Handler', 'PlayContext'):
                # Intermediate objects like Play() won't have their fields validated by
                # default, as their values are often inherited by other objects and validated
                # later, so we don't want them to fail out early
                continue

            try:
                # Run the post-validator if present. These methods are responsible for
                # using the given templar to template the values, if required.
                method = getattr(self, '_post_validate_%s' % name, None)
                if method:
                    value = method(attribute, getattr(self, name), templar)
                elif attribute.isa == 'class':
                    value = getattr(self, name)
                else:
                    # if the attribute contains a variable, template it now
                    value = templar.template(getattr(self, name))

                # if this evaluated to the omit value, set the value back to
                # the default specified in the FieldAttribute and move on
                if omit_value is not None and value == omit_value:
                    setattr(self, name, attribute.default)
                    continue

                # and make sure the attribute is of the type it should be
                if value is not None:
                    if attribute.isa == 'string':
                        value = to_text(value)
                    elif attribute.isa == 'int':
                        value = int(value)
                    elif attribute.isa == 'float':
                        value = float(value)
                    elif attribute.isa == 'bool':
                        value = boolean(value)
                    elif attribute.isa == 'percent':
                        # special value, which may be an integer or float
                        # with an optional '%' at the end
                        if isinstance(value, string_types) and '%' in value:
                            value = value.replace('%', '')
                        value = float(value)
                    elif attribute.isa in ('list', 'barelist'):
                        if value is None:
                            value = []
                        elif not isinstance(value, list):
                            if isinstance(value, string_types) and attribute.isa == 'barelist':
                                display.deprecated(
                                    "Using comma separated values for a list has been deprecated. "
                                    "You should instead use the correct YAML syntax for lists. "
                                )
                                value = value.split(',')
                            else:
                                value = [ value ]
                        if attribute.listof is not None:
                            for item in value:
                                if not isinstance(item, attribute.listof):
                                    raise AnsibleParserError("the field '%s' should be a list of %s,"
                                            " but the item '%s' is a %s" % (name, attribute.listof, item, type(item)), obj=self.get_ds())
                                elif attribute.required and attribute.listof == string_types:
                                    if item is None or item.strip() == "":
                                        raise AnsibleParserError("the field '%s' is required, and cannot have empty values" % (name,), obj=self.get_ds())
                    elif attribute.isa == 'set':
                        if value is None:
                            value = set()
                        elif not isinstance(value, (list, set)):
                            if isinstance(value, string_types):
                                value = value.split(',')
                            else:
                                # Making a list like this handles strings of
                                # text and bytes properly
                                value = [ value ]
                        if not isinstance(value, set):
                            value = set(value)
                    elif attribute.isa == 'dict':
                        if value is None:
                            value = dict()
                        elif not isinstance(value, dict):
                            raise TypeError("%s is not a dictionary" % value)
                    elif attribute.isa == 'class':
                        if not isinstance(value, attribute.class_type):
                            raise TypeError("%s is not a valid %s (got a %s instead)" % (name, attribute.class_type, type(value)))
                        value.post_validate(templar=templar)

                # and assign the massaged value back to the attribute field
                setattr(self, name, value)

            except (TypeError, ValueError) as e:
                raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s."
                        " Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds())
            except (AnsibleUndefinedVariable, UndefinedError) as e:
                if templar._fail_on_undefined_errors and name != 'name':
                    raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined."
                            " The error was: %s" % (name,e), obj=self.get_ds())

        self._finalized = True
Exemple #31
0
    def run(self):

        super(DocCLI, self).run()

        if self.options.module_path is not None:
            for i in self.options.module_path.split(os.pathsep):
                module_loader.add_directory(i)

        # list modules
        if self.options.list_dir:
            paths = module_loader._get_paths()
            for path in paths:
                self.find_modules(path)

            self.pager(self.get_module_list_text())
            return 0

        # process all modules
        if self.options.all_modules:
            paths = module_loader._get_paths()
            for path in paths:
                self.find_modules(path)
            self.args = sorted(
                set(self.module_list) - module_docs.BLACKLIST_MODULES)

        if len(self.args) == 0:
            raise AnsibleOptionsError("Incorrect options passed")

        # process command line module list
        text = ''
        for module in self.args:

            try:
                # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
                filename = module_loader.find_plugin(module, mod_type='.py')
                if filename is None:
                    display.warning(
                        "module %s not found in %s\n" %
                        (module, DocCLI.print_paths(module_loader)))
                    continue

                if any(filename.endswith(x) for x in C.BLACKLIST_EXTS):
                    continue

                try:
                    doc, plainexamples, returndocs, metadata = module_docs.get_docstring(
                        filename, verbose=(self.options.verbosity > 0))
                except:
                    display.vvv(traceback.format_exc())
                    display.error(
                        "module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv"
                        % module)
                    continue

                if doc is not None:

                    # is there corresponding action plugin?
                    if module in action_loader:
                        doc['action'] = True
                    else:
                        doc['action'] = False

                    all_keys = []
                    for (k, v) in iteritems(doc['options']):
                        all_keys.append(k)
                    all_keys = sorted(all_keys)
                    doc['option_keys'] = all_keys

                    doc['filename'] = filename
                    doc['docuri'] = doc['module'].replace('_', '-')
                    doc['now_date'] = datetime.date.today().strftime(
                        '%Y-%m-%d')
                    doc['plainexamples'] = plainexamples
                    doc['returndocs'] = returndocs
                    doc['metadata'] = metadata

                    if self.options.show_snippet:
                        text += self.get_snippet_text(doc)
                    else:
                        text += self.get_man_text(doc)
                else:
                    # this typically means we couldn't even parse the docstring, not just that the YAML is busted,
                    # probably a quoting issue.
                    raise AnsibleError("Parsing produced an empty object.")
            except Exception as e:
                display.vvv(traceback.format_exc())
                raise AnsibleError(
                    "module %s missing documentation (or could not parse documentation): %s\n"
                    % (module, str(e)))

        if text:
            self.pager(text)
        return 0
Exemple #32
0
    def _parse(self, err):

        all_hosts = {}

        # not passing from_remote because data from CMDB is trusted
        try:
            self.raw = self._loader.load(self.data)
        except Exception as e:
            sys.stderr.write(err + "\n")
            raise AnsibleError(
                "failed to parse executable inventory script results from {0}: {1}"
                .format(to_native(self.filename), to_native(e)))

        if not isinstance(self.raw, Mapping):
            sys.stderr.write(err + "\n")
            raise AnsibleError(
                "failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict"
                .format(to_native(self.filename)))

        group = None
        for (group_name, data) in self.raw.items():

            # in Ansible 1.3 and later, a "_meta" subelement may contain
            # a variable "hostvars" which contains a hash for each host
            # if this "hostvars" exists at all then do not call --host for each
            # host.  This is for efficiency and scripts should still return data
            # if called with --host for backwards compat with 1.2 and earlier.

            if group_name == '_meta':
                if 'hostvars' in data:
                    self.host_vars_from_top = data['hostvars']
                    continue

            if group_name not in self.groups:
                group = self.groups[group_name] = Group(group_name)

            group = self.groups[group_name]
            host = None

            if not isinstance(data, dict):
                data = {'hosts': data}
            # is not those subkeys, then simplified syntax, host with vars
            elif not any(k in data for k in ('hosts', 'vars', 'children')):
                data = {'hosts': [group_name], 'vars': data}

            if 'hosts' in data:
                if not isinstance(data['hosts'], list):
                    raise AnsibleError("You defined a group \"%s\" with bad "
                                       "data for the host list:\n %s" %
                                       (group_name, data))

                for hostname in data['hosts']:
                    if hostname not in all_hosts:
                        all_hosts[hostname] = Host(hostname)
                    host = all_hosts[hostname]
                    group.add_host(host)

            if 'vars' in data:
                if not isinstance(data['vars'], dict):
                    raise AnsibleError("You defined a group \"%s\" with bad "
                                       "data for variables:\n %s" %
                                       (group_name, data))

                for k, v in iteritems(data['vars']):
                    group.set_variable(k, v)

        # Separate loop to ensure all groups are defined
        for (group_name, data) in self.raw.items():
            if group_name == '_meta':
                continue
            if isinstance(data, dict) and 'children' in data:
                for child_name in data['children']:
                    if child_name in self.groups:
                        self.groups[group_name].add_child_group(
                            self.groups[child_name])

        # Finally, add all top-level groups as children of 'all'.
        # We exclude ungrouped here because it was already added as a child of
        # 'all' at the time it was created.

        for group in self.groups.values():
            if group.depth == 0 and group.name not in ('all', 'ungrouped'):
                self.groups['all'].add_child_group(group)
Exemple #33
0
    def set_task_and_variable_override(self, task, variables, templar):
        '''
        Sets attributes from the task if they are set, which will override
        those from the play.
        '''

        new_info = self.copy()

        # loop through a subset of attributes on the task object and set
        # connection fields based on their values
        for attr in TASK_ATTRIBUTE_OVERRIDES:
            if hasattr(task, attr):
                attr_val = getattr(task, attr)
                if attr_val is not None:
                    setattr(new_info, attr, attr_val)

        # next, use the MAGIC_VARIABLE_MAPPING dictionary to update this
        # connection info object with 'magic' variables from the variable list.
        # If the value 'ansible_delegated_vars' is in the variables, it means
        # we have a delegated-to host, so we check there first before looking
        # at the variables in general
        if task.delegate_to is not None:
            # In the case of a loop, the delegated_to host may have been
            # templated based on the loop variable, so we try and locate
            # the host name in the delegated variable dictionary here
            delegated_host_name = templar.template(task.delegate_to)
            delegated_vars = variables.get('ansible_delegated_vars',
                                           dict()).get(delegated_host_name,
                                                       dict())

            delegated_transport = C.DEFAULT_TRANSPORT
            for transport_var in MAGIC_VARIABLE_MAPPING.get('connection'):
                if transport_var in delegated_vars:
                    delegated_transport = delegated_vars[transport_var]
                    break

            # make sure this delegated_to host has something set for its remote
            # address, otherwise we default to connecting to it by name. This
            # may happen when users put an IP entry into their inventory, or if
            # they rely on DNS for a non-inventory hostname
            for address_var in MAGIC_VARIABLE_MAPPING.get('remote_addr'):
                if address_var in delegated_vars:
                    break
            else:
                display.debug(
                    "no remote address found for delegated host %s\nusing its name, so success depends on DNS resolution"
                    % delegated_host_name)
                delegated_vars['ansible_host'] = delegated_host_name

            # reset the port back to the default if none was specified, to prevent
            # the delegated host from inheriting the original host's setting
            for port_var in MAGIC_VARIABLE_MAPPING.get('port'):
                if port_var in delegated_vars:
                    break
            else:
                if delegated_transport == 'winrm':
                    delegated_vars['ansible_port'] = 5986
                else:
                    delegated_vars['ansible_port'] = C.DEFAULT_REMOTE_PORT

            # and likewise for the remote user
            for user_var in MAGIC_VARIABLE_MAPPING.get('remote_user'):
                if user_var in delegated_vars and delegated_vars[user_var]:
                    break
            else:
                delegated_vars[
                    'ansible_user'] = task.remote_user or self.remote_user
        else:
            delegated_vars = dict()

            # setup shell
            for exe_var in MAGIC_VARIABLE_MAPPING.get('executable'):
                if exe_var in variables:
                    setattr(new_info, 'executable', variables.get(exe_var))

        attrs_considered = []
        for (attr, variable_names) in iteritems(MAGIC_VARIABLE_MAPPING):
            for variable_name in variable_names:
                if attr in attrs_considered:
                    continue
                # if delegation task ONLY use delegated host vars, avoid delegated FOR host vars
                if task.delegate_to is not None:
                    if isinstance(delegated_vars,
                                  dict) and variable_name in delegated_vars:
                        setattr(new_info, attr, delegated_vars[variable_name])
                        attrs_considered.append(attr)
                elif variable_name in variables:
                    setattr(new_info, attr, variables[variable_name])
                    attrs_considered.append(attr)
                # no else, as no other vars should be considered

        # become legacy updates -- from commandline
        if not new_info.become_pass:
            if new_info.become_method == 'sudo' and new_info.sudo_pass:
                setattr(new_info, 'become_pass', new_info.sudo_pass)
            elif new_info.become_method == 'su' and new_info.su_pass:
                setattr(new_info, 'become_pass', new_info.su_pass)

        # become legacy updates -- from inventory file (inventory overrides
        # commandline)
        for become_pass_name in MAGIC_VARIABLE_MAPPING.get('become_pass'):
            if become_pass_name in variables:
                break
        else:  # This is a for-else
            if new_info.become_method == 'sudo':
                for sudo_pass_name in MAGIC_VARIABLE_MAPPING.get('sudo_pass'):
                    if sudo_pass_name in variables:
                        setattr(new_info, 'become_pass',
                                variables[sudo_pass_name])
                        break
            if new_info.become_method == 'sudo':
                for su_pass_name in MAGIC_VARIABLE_MAPPING.get('su_pass'):
                    if su_pass_name in variables:
                        setattr(new_info, 'become_pass',
                                variables[su_pass_name])
                        break

        # make sure we get port defaults if needed
        if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None:
            new_info.port = int(C.DEFAULT_REMOTE_PORT)

        # special overrides for the connection setting
        if len(delegated_vars) > 0:
            # in the event that we were using local before make sure to reset the
            # connection type to the default transport for the delegated-to host,
            # if not otherwise specified
            for connection_type in MAGIC_VARIABLE_MAPPING.get('connection'):
                if connection_type in delegated_vars:
                    break
            else:
                remote_addr_local = new_info.remote_addr in C.LOCALHOST
                inv_hostname_local = delegated_vars.get(
                    'inventory_hostname') in C.LOCALHOST
                if remote_addr_local and inv_hostname_local:
                    setattr(new_info, 'connection', 'local')
                elif getattr(new_info, 'connection',
                             None) == 'local' and (not remote_addr_local
                                                   or not inv_hostname_local):
                    setattr(new_info, 'connection', C.DEFAULT_TRANSPORT)

        # if the final connection type is local, reset the remote_user value
        # to that of the currently logged in user, to ensure any become settings
        # are obeyed correctly
        # additionally, we need to do this check after final connection has been
        # correctly set above ...
        if new_info.connection == 'local':
            new_info.remote_user = pwd.getpwuid(os.getuid()).pw_name

        # set no_log to default if it was not previouslly set
        if new_info.no_log is None:
            new_info.no_log = C.DEFAULT_NO_LOG

        # set become defaults if not previouslly set
        task.set_become_defaults(new_info.become, new_info.become_method,
                                 new_info.become_user)

        if task.always_run:
            display.deprecated(
                "always_run is deprecated. Use check_mode = no instead.",
                version="2.4",
                removed=False)
            new_info.check_mode = False

        # check_mode replaces always_run, overwrite always_run if both are given
        if task.check_mode is not None:
            new_info.check_mode = task.check_mode

        return new_info
Exemple #34
0
 def get_failed_hosts(self):
     return dict((host, True) for (host, state) in iteritems(self._host_states) if self._check_failed_state(state))
Exemple #35
0
    def _execute_module(self,
                        module_name=None,
                        module_args=None,
                        tmp=None,
                        task_vars=None,
                        persist_files=False,
                        delete_remote_tmp=True,
                        wrap_async=False):
        '''
        Transfer and run a module along with its arguments.
        '''
        if task_vars is None:
            task_vars = dict()

        remote_module_path = None
        args_file_path = None
        remote_files = []

        # if a module name was not specified for this execution, use the action from the task
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args

        self._update_module_args(module_name, module_args, task_vars)

        # FUTURE: refactor this along with module build process to better encapsulate "smart wrapper" functionality
        (module_style, shebang, module_data,
         module_path) = self._configure_module(module_name=module_name,
                                               module_args=module_args,
                                               task_vars=task_vars)
        display.vvv("Using module file %s" % module_path)
        if not shebang and module_style != 'binary':
            raise AnsibleError("module (%s) is missing interpreter line" %
                               module_name)

        if not self._is_pipelining_enabled(module_style, wrap_async):

            # we might need remote tmp dir
            if not tmp or not 'tmp' in tmp:
                tmp = self._make_tmp_path()

            remote_module_filename = self._connection._shell.get_remote_filename(
                module_path)
            remote_module_path = self._connection._shell.join_path(
                tmp, remote_module_filename)

        if module_style in ('old', 'non_native_want_json', 'binary'):
            # we'll also need a temp file to hold our module arguments
            args_file_path = self._connection._shell.join_path(tmp, 'args')

        if remote_module_path or module_style != 'new':
            display.debug("transferring module to remote %s" %
                          remote_module_path)
            if module_style == 'binary':
                self._transfer_file(module_path, remote_module_path)
            else:
                self._transfer_data(remote_module_path, module_data)
            if module_style == 'old':
                # we need to dump the module args to a k=v string in a file on
                # the remote system, which can be read and parsed by the module
                args_data = ""
                for k, v in iteritems(module_args):
                    args_data += '%s=%s ' % (k, shlex_quote(text_type(v)))
                self._transfer_data(args_file_path, args_data)
            elif module_style in ('non_native_want_json', 'binary'):
                self._transfer_data(args_file_path, json.dumps(module_args))
            display.debug("done transferring module to remote")

        environment_string = self._compute_environment_string()

        if tmp and remote_module_path:
            remote_files = [tmp, remote_module_path]

        if args_file_path:
            remote_files.append(args_file_path)

        sudoable = True
        in_data = None
        cmd = ""

        if wrap_async:
            # configure, upload, and chmod the async_wrapper module
            (async_module_style, shebang, async_module_data,
             async_module_path) = self._configure_module(
                 module_name='async_wrapper',
                 module_args=dict(),
                 task_vars=task_vars)
            async_module_remote_filename = self._connection._shell.get_remote_filename(
                async_module_path)
            remote_async_module_path = self._connection._shell.join_path(
                tmp, async_module_remote_filename)
            self._transfer_data(remote_async_module_path, async_module_data)
            remote_files.append(remote_async_module_path)

            async_limit = self._task. async
 def get_failed_hosts(self):
     return dict((host, True) for (host, state) in iteritems(self._host_states) if state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE)
Exemple #37
0
 def get_failed_hosts(self):
     return dict((host, True)
                 for (host, state) in iteritems(self._host_states)
                 if state.run_state == self.ITERATING_COMPLETE
                 and state.fail_state != self.FAILED_NONE)
Exemple #38
0
    def run(self, tmp=None, task_vars=None):
        ''' transfer the given module name, plus the async module, then run it '''
        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)

        if self._play_context.check_mode:
            result['skipped'] = True
            result['msg'] = 'check mode not supported for this module'
            return result

        remote_user = task_vars.get(
            'ansible_ssh_user') or self._play_context.remote_user
        if not tmp:
            tmp = self._make_tmp_path(remote_user)
            self._cleanup_remote_tmp = True

        module_name = self._task.action
        async_module_path = self._connection._shell.join_path(
            tmp, 'async_wrapper')
        remote_module_path = self._connection._shell.join_path(
            tmp, module_name)

        env_string = self._compute_environment_string()

        module_args = self._task.args.copy()
        if self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG:
            module_args['_ansible_no_log'] = True

        # configure, upload, and chmod the target module
        (module_style, shebang, module_data,
         module_path) = self._configure_module(module_name=module_name,
                                               module_args=module_args,
                                               task_vars=task_vars)
        if module_style == 'binary':
            self._transfer_file(module_path, remote_module_path)
        else:
            self._transfer_data(remote_module_path, module_data)

        # configure, upload, and chmod the async_wrapper module
        (async_module_style, shebang, async_module_data,
         _) = self._configure_module(module_name='async_wrapper',
                                     module_args=dict(),
                                     task_vars=task_vars)
        self._transfer_data(async_module_path, async_module_data)

        argsfile = None
        if module_style in ('non_native_want_json', 'binary'):
            argsfile = self._transfer_data(
                self._connection._shell.join_path(tmp, 'arguments'),
                json.dumps(module_args))
        elif module_style == 'old':
            args_data = ""
            for k, v in iteritems(module_args):
                args_data += '%s="%s" ' % (k, pipes.quote(to_unicode(v)))
            argsfile = self._transfer_data(
                self._connection._shell.join_path(tmp, 'arguments'), args_data)

        remote_paths = tmp, remote_module_path, async_module_path

        # argsfile doesn't need to be executable, but this saves an extra call to the remote host
        if argsfile:
            remote_paths += argsfile,

        self._fixup_perms(remote_paths, remote_user, execute=True)

        async_limit = self._task. async
Exemple #39
0
    def _get_next_task_lockstep(self, hosts, iterator):
        '''
        Returns a list of (host, task) tuples, where the task may
        be a noop task to keep the iterator in lock step across
        all hosts.
        '''

        noop_task = Task()
        noop_task.action = 'meta'
        noop_task.args['_raw_params'] = 'noop'
        noop_task.set_loader(iterator._play._loader)

        host_tasks = {}
        display.debug("building list of next tasks for hosts")
        for host in hosts:
            host_tasks[host.name] = iterator.get_next_task_for_host(host,
                                                                    peek=True)
        display.debug("done building task lists")

        num_setups = 0
        num_tasks = 0
        num_rescue = 0
        num_always = 0

        lowest_cur_block = len(iterator._blocks)

        display.debug("counting tasks in each state of execution")
        for (k, v) in iteritems(host_tasks):
            if v is None:
                continue

            (s, t) = v
            if t is None:
                continue

            if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
                lowest_cur_block = s.cur_block

            if s.run_state == PlayIterator.ITERATING_SETUP:
                num_setups += 1
            elif s.run_state == PlayIterator.ITERATING_TASKS:
                num_tasks += 1
            elif s.run_state == PlayIterator.ITERATING_RESCUE:
                num_rescue += 1
            elif s.run_state == PlayIterator.ITERATING_ALWAYS:
                num_always += 1
        display.debug("done counting tasks in each state of execution")

        def _advance_selected_hosts(hosts, cur_block, cur_state):
            '''
            This helper returns the task for all hosts in the requested
            state, otherwise they get a noop dummy task. This also advances
            the state of the host, since the given states are determined
            while using peek=True.
            '''
            # we return the values in the order they were originally
            # specified in the given hosts array
            rvals = []
            display.debug("starting to advance hosts")
            for host in hosts:
                host_state_task = host_tasks[host.name]
                if host_state_task is None:
                    continue
                (s, t) = host_state_task
                if t is None:
                    continue
                if s.run_state == cur_state and s.cur_block == cur_block:
                    new_t = iterator.get_next_task_for_host(host)
                    rvals.append((host, t))
                else:
                    rvals.append((host, noop_task))
            display.debug("done advancing hosts to next task")
            return rvals

        # if any hosts are in ITERATING_SETUP, return the setup task
        # while all other hosts get a noop
        if num_setups:
            display.debug("advancing hosts in ITERATING_SETUP")
            return _advance_selected_hosts(hosts, lowest_cur_block,
                                           PlayIterator.ITERATING_SETUP)

        # if any hosts are in ITERATING_TASKS, return the next normal
        # task for these hosts, while all other hosts get a noop
        if num_tasks:
            display.debug("advancing hosts in ITERATING_TASKS")
            return _advance_selected_hosts(hosts, lowest_cur_block,
                                           PlayIterator.ITERATING_TASKS)

        # if any hosts are in ITERATING_RESCUE, return the next rescue
        # task for these hosts, while all other hosts get a noop
        if num_rescue:
            display.debug("advancing hosts in ITERATING_RESCUE")
            return _advance_selected_hosts(hosts, lowest_cur_block,
                                           PlayIterator.ITERATING_RESCUE)

        # if any hosts are in ITERATING_ALWAYS, return the next always
        # task for these hosts, while all other hosts get a noop
        if num_always:
            display.debug("advancing hosts in ITERATING_ALWAYS")
            return _advance_selected_hosts(hosts, lowest_cur_block,
                                           PlayIterator.ITERATING_ALWAYS)

        # at this point, everything must be ITERATING_COMPLETE, so we
        # return None for all hosts in the list
        display.debug("all hosts are done, so returning None's for all hosts")
        return [(host, None) for host in hosts]
Exemple #40
0
    def _get_next_task_lockstep(self, hosts, iterator):
        '''
        Returns a list of (host, task) tuples, where the task may
        be a noop task to keep the iterator in lock step across
        all hosts.
        '''

        noop_task = Task()
        noop_task.action = 'meta'
        noop_task.args['_raw_params'] = 'noop'
        noop_task.set_loader(iterator._play._loader)

        host_tasks = {}
        display.debug("building list of next tasks for hosts")
        for host in hosts:
            host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
        display.debug("done building task lists")

        num_setups = 0
        num_tasks  = 0
        num_rescue = 0
        num_always = 0

        display.debug("counting tasks in each state of execution")
        host_tasks_to_run = [(host, state_task)
                             for host, state_task in iteritems(host_tasks)
                             if state_task and state_task[1]]

        if host_tasks_to_run:
            try:
                lowest_cur_block = min(
                    (s.cur_block for h, (s, t) in host_tasks_to_run
                    if s.run_state != PlayIterator.ITERATING_COMPLETE))
            except ValueError:
                lowest_cur_block = None
        else:
            # empty host_tasks_to_run will just run till the end of the function
            # without ever touching lowest_cur_block
            lowest_cur_block = None

        for (k, v) in host_tasks_to_run:
            (s, t) = v

            if s.cur_block > lowest_cur_block:
                # Not the current block, ignore it
                continue

            if s.run_state == PlayIterator.ITERATING_SETUP:
                num_setups += 1
            elif s.run_state == PlayIterator.ITERATING_TASKS:
                num_tasks += 1
            elif s.run_state == PlayIterator.ITERATING_RESCUE:
                num_rescue += 1
            elif s.run_state == PlayIterator.ITERATING_ALWAYS:
                num_always += 1
        display.debug("done counting tasks in each state of execution:\n\tnum_setups: %s\n\tnum_tasks: %s\n\tnum_rescue: %s\n\tnum_always: %s" % (num_setups, num_tasks, num_rescue, num_always))

        def _advance_selected_hosts(hosts, cur_block, cur_state):
            '''
            This helper returns the task for all hosts in the requested
            state, otherwise they get a noop dummy task. This also advances
            the state of the host, since the given states are determined
            while using peek=True.
            '''
            # we return the values in the order they were originally
            # specified in the given hosts array
            rvals = []
            display.debug("starting to advance hosts")
            for host in hosts:
                host_state_task = host_tasks.get(host.name)
                if host_state_task is None:
                    continue
                (s, t) = host_state_task
                if t is None:
                    continue
                if s.run_state == cur_state and s.cur_block == cur_block:
                    new_t = iterator.get_next_task_for_host(host)
                    rvals.append((host, t))
                else:
                    rvals.append((host, noop_task))
            display.debug("done advancing hosts to next task")
            return rvals

        # if any hosts are in ITERATING_SETUP, return the setup task
        # while all other hosts get a noop
        if num_setups:
            display.debug("advancing hosts in ITERATING_SETUP")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)

        # if any hosts are in ITERATING_TASKS, return the next normal
        # task for these hosts, while all other hosts get a noop
        if num_tasks:
            display.debug("advancing hosts in ITERATING_TASKS")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)

        # if any hosts are in ITERATING_RESCUE, return the next rescue
        # task for these hosts, while all other hosts get a noop
        if num_rescue:
            display.debug("advancing hosts in ITERATING_RESCUE")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)

        # if any hosts are in ITERATING_ALWAYS, return the next always
        # task for these hosts, while all other hosts get a noop
        if num_always:
            display.debug("advancing hosts in ITERATING_ALWAYS")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)

        # at this point, everything must be ITERATING_COMPLETE, so we
        # return None for all hosts in the list
        display.debug("all hosts are done, so returning None's for all hosts")
        return [(host, None) for host in hosts]
Exemple #41
0
    def run(self):
        '''
        The main thread execution, which reads from the results queue
        indefinitely and sends callbacks/etc. when results are received.
        '''

        if HAS_ATFORK:
            atfork()

        while True:
            try:
                result = self._read_worker_result()
                if result is None:
                    time.sleep(0.01)
                    continue

                # if this task is registering a result, do it now
                if result._task.register:
                    self._send_result(('register_host_var', result._host,
                                       result._task.register, result._result))

                # send callbacks, execute other options based on the result status
                # TODO: this should all be cleaned up and probably moved to a sub-function.
                #       the fact that this sometimes sends a TaskResult and other times
                #       sends a raw dictionary back may be confusing, but the result vs.
                #       results implementation for tasks with loops should be cleaned up
                #       better than this
                if result.is_unreachable():
                    self._send_result(('host_unreachable', result))
                elif result.is_failed():
                    self._send_result(('host_task_failed', result))
                elif result.is_skipped():
                    self._send_result(('host_task_skipped', result))
                else:
                    if result._task.loop:
                        # this task had a loop, and has more than one result, so
                        # loop over all of them instead of a single result
                        result_items = result._result['results']
                    else:
                        result_items = [result._result]

                    for result_item in result_items:
                        # if this task is notifying a handler, do it now
                        if '_ansible_notify' in result_item:
                            if result.is_changed():
                                # The shared dictionary for notified handlers is a proxy, which
                                # does not detect when sub-objects within the proxy are modified.
                                # So, per the docs, we reassign the list so the proxy picks up and
                                # notifies all other threads
                                for notify in result_item['_ansible_notify']:
                                    self._send_result(
                                        ('notify_handler', result, notify))
                            # now remove the notify field from the results, as its no longer needed
                            result_item.pop('_ansible_notify')

                        if 'add_host' in result_item:
                            # this task added a new host (add_host module)
                            self._send_result(('add_host', result_item))
                        elif 'add_group' in result_item:
                            # this task added a new group (group_by module)
                            self._send_result(
                                ('add_group', result._host, result_item))
                        elif 'ansible_facts' in result_item:
                            # if this task is registering facts, do that now
                            item = result_item.get('item', None)
                            if result._task.action == 'include_vars':
                                for (key, value) in iteritems(
                                        result_item['ansible_facts']):
                                    self._send_result(
                                        ('set_host_var', result._host,
                                         result._task, item, key, value))
                            else:
                                self._send_result(
                                    ('set_host_facts', result._host,
                                     result._task, item,
                                     result_item['ansible_facts']))

                    # finally, send the ok for this task
                    self._send_result(('host_task_ok', result))

            except queue.Empty:
                pass
            except (KeyboardInterrupt, IOError, EOFError):
                break
            except:
                # TODO: we should probably send a proper callback here instead of
                #       simply dumping a stack trace on the screen
                traceback.print_exc()
                break
    def set_task_and_variable_override(self, task, variables, templar):
        '''
        Sets attributes from the task if they are set, which will override
        those from the play.
        '''

        new_info = self.copy()

        # loop through a subset of attributes on the task object and set
        # connection fields based on their values
        for attr in TASK_ATTRIBUTE_OVERRIDES:
            if hasattr(task, attr):
                attr_val = getattr(task, attr)
                if attr_val is not None:
                    setattr(new_info, attr, attr_val)

        # next, use the MAGIC_VARIABLE_MAPPING dictionary to update this
        # connection info object with 'magic' variables from the variable list.
        # If the value 'ansible_delegated_vars' is in the variables, it means
        # we have a delegated-to host, so we check there first before looking
        # at the variables in general
        if task.delegate_to is not None:
            # In the case of a loop, the delegated_to host may have been
            # templated based on the loop variable, so we try and locate
            # the host name in the delegated variable dictionary here
            delegated_host_name = templar.template(task.delegate_to)
            delegated_vars = variables.get('ansible_delegated_vars', dict()).get(delegated_host_name, dict())

            delegated_transport = C.DEFAULT_TRANSPORT
            for transport_var in MAGIC_VARIABLE_MAPPING.get('connection'):
                if transport_var in delegated_vars:
                    delegated_transport = delegated_vars[transport_var]
                    break

            # make sure this delegated_to host has something set for its remote
            # address, otherwise we default to connecting to it by name. This
            # may happen when users put an IP entry into their inventory, or if
            # they rely on DNS for a non-inventory hostname
            for address_var in MAGIC_VARIABLE_MAPPING.get('remote_addr'):
                if address_var in delegated_vars:
                    break
            else:
                display.debug("no remote address found for delegated host %s\nusing its name, so success depends on DNS resolution" % delegated_host_name)
                delegated_vars['ansible_host'] = delegated_host_name

            # reset the port back to the default if none was specified, to prevent
            # the delegated host from inheriting the original host's setting
            for port_var in MAGIC_VARIABLE_MAPPING.get('port'):
                if port_var in delegated_vars:
                    break
            else:
                if delegated_transport == 'winrm':
                    delegated_vars['ansible_port'] = 5986
                else:
                    delegated_vars['ansible_port'] = C.DEFAULT_REMOTE_PORT

            # and likewise for the remote user
            for user_var in MAGIC_VARIABLE_MAPPING.get('remote_user'):
                if user_var in delegated_vars and delegated_vars[user_var]:
                    break
            else:
                delegated_vars['ansible_user'] = task.remote_user or self.remote_user
        else:
            delegated_vars = dict()

            # setup shell
            for exe_var in MAGIC_VARIABLE_MAPPING.get('executable'):
                if exe_var in variables:
                    setattr(new_info, 'executable', variables.get(exe_var))

        attrs_considered = []
        for (attr, variable_names) in iteritems(MAGIC_VARIABLE_MAPPING):
            for variable_name in variable_names:
                if attr in attrs_considered:
                    continue
                # if delegation task ONLY use delegated host vars, avoid delegated FOR host vars
                if task.delegate_to is not None:
                    if isinstance(delegated_vars, dict) and variable_name in delegated_vars:
                        setattr(new_info, attr, delegated_vars[variable_name])
                        attrs_considered.append(attr)
                elif variable_name in variables:
                    setattr(new_info, attr, variables[variable_name])
                    attrs_considered.append(attr)
                # no else, as no other vars should be considered

        # become legacy updates -- from commandline
        if not new_info.become_pass:
            if new_info.become_method == 'sudo' and new_info.sudo_pass:
               setattr(new_info, 'become_pass', new_info.sudo_pass)
            elif new_info.become_method == 'su' and new_info.su_pass:
               setattr(new_info, 'become_pass', new_info.su_pass)

        # become legacy updates -- from inventory file (inventory overrides
        # commandline)
        for become_pass_name in MAGIC_VARIABLE_MAPPING.get('become_pass'):
            if become_pass_name in variables:
                break
        else:  # This is a for-else
            if new_info.become_method == 'sudo':
                for sudo_pass_name in MAGIC_VARIABLE_MAPPING.get('sudo_pass'):
                    if sudo_pass_name in variables:
                        setattr(new_info, 'become_pass', variables[sudo_pass_name])
                        break
            if new_info.become_method == 'sudo':
                for su_pass_name in MAGIC_VARIABLE_MAPPING.get('su_pass'):
                    if su_pass_name in variables:
                        setattr(new_info, 'become_pass', variables[su_pass_name])
                        break

        # make sure we get port defaults if needed
        if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None:
            new_info.port = int(C.DEFAULT_REMOTE_PORT)

        # if the final connection type is local, reset the remote_user value
        # to that of the currently logged in user, to ensure any become settings
        # are obeyed correctly
        if new_info.connection == 'local':
            new_info.remote_user = pwd.getpwuid(os.getuid()).pw_name

        # special overrides for the connection setting
        if len(delegated_vars) > 0:
            # in the event that we were using local before make sure to reset the
            # connection type to the default transport for the delegated-to host,
            # if not otherwise specified
            for connection_type in MAGIC_VARIABLE_MAPPING.get('connection'):
                if connection_type in delegated_vars:
                    break
            else:
                remote_addr_local  = new_info.remote_addr in C.LOCALHOST
                inv_hostname_local = delegated_vars.get('inventory_hostname') in C.LOCALHOST
                if remote_addr_local and inv_hostname_local:
                    setattr(new_info, 'connection', 'local')
                elif getattr(new_info, 'connection', None) == 'local' and (not remote_addr_local or not inv_hostname_local):
                    setattr(new_info, 'connection', C.DEFAULT_TRANSPORT)

        # set no_log to default if it was not previouslly set
        if new_info.no_log is None:
            new_info.no_log = C.DEFAULT_NO_LOG

        # set become defaults if not previouslly set
        task.set_become_defaults(new_info.become, new_info.become_method, new_info.become_user)

        # have always_run override check mode
        if task.always_run:
            new_info.check_mode = False

        return new_info
Exemple #43
0
    def _process_pending_results(self, iterator):
        '''
        Reads results off the final queue and takes appropriate action
        based on the result (executing callbacks, updating state, etc.).
        '''

        ret_results = []

        while not self._final_q.empty() and not self._tqm._terminated:
            try:
                result = self._final_q.get()
                display.debug("got result from result worker: %s" %
                              ([text_type(x) for x in result], ))

                # all host status messages contain 2 entries: (msg, task_result)
                if result[0] in ('host_task_ok', 'host_task_failed',
                                 'host_task_skipped', 'host_unreachable'):
                    task_result = result[1]
                    host = task_result._host
                    task = task_result._task
                    if result[0] == 'host_task_failed' or task_result.is_failed(
                    ):
                        if not task.ignore_errors:
                            display.debug("marking %s as failed" % host.name)
                            if task.run_once:
                                # if we're using run_once, we have to fail every host here
                                [
                                    iterator.mark_host_failed(h)
                                    for h in self._inventory.get_hosts(
                                        iterator._play.hosts) if h.name not in
                                    self._tqm._unreachable_hosts
                                ]
                            else:
                                iterator.mark_host_failed(host)
                            self._tqm._failed_hosts[host.name] = True
                            self._tqm._stats.increment('failures', host.name)
                        else:
                            self._tqm._stats.increment('ok', host.name)
                        self._tqm.send_callback(
                            'v2_runner_on_failed',
                            task_result,
                            ignore_errors=task.ignore_errors)
                    elif result[0] == 'host_unreachable':
                        self._tqm._unreachable_hosts[host.name] = True
                        self._tqm._stats.increment('dark', host.name)
                        self._tqm.send_callback('v2_runner_on_unreachable',
                                                task_result)
                    elif result[0] == 'host_task_skipped':
                        self._tqm._stats.increment('skipped', host.name)
                        self._tqm.send_callback('v2_runner_on_skipped',
                                                task_result)
                    elif result[0] == 'host_task_ok':
                        if task.action != 'include':
                            self._tqm._stats.increment('ok', host.name)
                            if 'changed' in task_result._result and task_result._result[
                                    'changed']:
                                self._tqm._stats.increment(
                                    'changed', host.name)
                            self._tqm.send_callback('v2_runner_on_ok',
                                                    task_result)

                        if self._diff and 'diff' in task_result._result:
                            self._tqm.send_callback('v2_on_file_diff',
                                                    task_result)

                    self._pending_results -= 1
                    if host.name in self._blocked_hosts:
                        del self._blocked_hosts[host.name]

                    # If this is a role task, mark the parent role as being run (if
                    # the task was ok or failed, but not skipped or unreachable)
                    if task_result._task._role is not None and result[0] in (
                            'host_task_ok', 'host_task_failed'):
                        # lookup the role in the ROLE_CACHE to make sure we're dealing
                        # with the correct object and mark it as executed
                        for (entry,
                             role_obj) in iteritems(iterator._play.ROLE_CACHE[
                                 task_result._task._role._role_name]):
                            if role_obj._uuid == task_result._task._role._uuid:
                                role_obj._had_task_run[host.name] = True

                    ret_results.append(task_result)

                elif result[0] == 'add_host':
                    result_item = result[1]
                    new_host_info = result_item.get('add_host', dict())

                    self._add_host(new_host_info, iterator)
                    self._tqm._hostvars_manager.hostvars().set_inventory(
                        self._inventory)

                elif result[0] == 'add_group':
                    host = result[1]
                    result_item = result[2]
                    self._add_group(host, result_item)
                    self._tqm._hostvars_manager.hostvars().set_inventory(
                        self._inventory)

                elif result[0] == 'notify_handler':
                    task_result = result[1]
                    handler_name = result[2]

                    original_task = iterator.get_original_task(
                        task_result._host, task_result._task)
                    if handler_name not in self._notified_handlers:
                        self._notified_handlers[handler_name] = []

                    if task_result._host not in self._notified_handlers[
                            handler_name]:
                        self._notified_handlers[handler_name].append(
                            task_result._host)
                        display.vv("NOTIFIED HANDLER %s" % (handler_name, ))

                elif result[0] == 'register_host_var':
                    # essentially the same as 'set_host_var' below, however we
                    # never follow the delegate_to value for registered vars and
                    # the variable goes in the fact_cache
                    host = result[1]
                    task = result[2]
                    var_value = wrap_var(result[3])
                    var_name = task.register

                    if task.run_once:
                        host_list = [
                            host for host in self._inventory.get_hosts(
                                iterator._play.hosts)
                            if host.name not in self._tqm._unreachable_hosts
                        ]
                    else:
                        host_list = [host]

                    for target_host in host_list:
                        self._variable_manager.set_nonpersistent_facts(
                            target_host, {var_name: var_value})
                        self._tqm._hostvars_manager.hostvars(
                        ).set_nonpersistent_facts(target_host,
                                                  {var_name: var_value})

                elif result[0] in ('set_host_var', 'set_host_facts'):
                    host = result[1]
                    task = result[2]
                    item = result[3]

                    # find the host we're actually refering too here, which may
                    # be a host that is not really in inventory at all
                    if task.delegate_to is not None and task.delegate_facts:
                        task_vars = self._variable_manager.get_vars(
                            loader=self._loader,
                            play=iterator._play,
                            host=host,
                            task=task)
                        self.add_tqm_variables(task_vars, play=iterator._play)
                        if item is not None:
                            task_vars['item'] = item
                        templar = Templar(loader=self._loader,
                                          variables=task_vars)
                        host_name = templar.template(task.delegate_to)
                        actual_host = self._inventory.get_host(host_name)
                        if actual_host is None:
                            actual_host = Host(name=host_name)
                    else:
                        actual_host = host

                    if result[0] == 'set_host_var':
                        var_name = result[4]
                        var_value = result[5]

                        if task.run_once:
                            host_list = [
                                host for host in self._inventory.get_hosts(
                                    iterator._play.hosts) if host.name not in
                                self._tqm._unreachable_hosts
                            ]
                        else:
                            host_list = [actual_host]

                        for target_host in host_list:
                            self._variable_manager.set_host_variable(
                                target_host, var_name, var_value)
                            self._tqm._hostvars_manager.hostvars(
                            ).set_host_variable(target_host, var_name,
                                                var_value)
                    elif result[0] == 'set_host_facts':
                        facts = result[4]
                        if task.action == 'set_fact':
                            self._variable_manager.set_nonpersistent_facts(
                                actual_host, facts)
                            self._tqm._hostvars_manager.hostvars(
                            ).set_nonpersistent_facts(actual_host, facts)
                        else:
                            self._variable_manager.set_host_facts(
                                actual_host, facts)
                            self._tqm._hostvars_manager.hostvars(
                            ).set_host_facts(actual_host, facts)

                else:
                    raise AnsibleError("unknown result message received: %s" %
                                       result[0])
            except Queue.Empty:
                time.sleep(0.0001)

        return ret_results
Exemple #44
0
    def _process_pending_results(self, iterator, one_pass=False):
        '''
        Reads results off the final queue and takes appropriate action
        based on the result (executing callbacks, updating state, etc.).
        '''

        ret_results = []

        while not self._final_q.empty() and not self._tqm._terminated:
            try:
                result = self._final_q.get()
                display.debug("got result from result worker: %s" %
                              ([text_type(x) for x in result], ))

                # helper method, used to find the original host from the one
                # returned in the result/message, which has been serialized and
                # thus had some information stripped from it to speed up the
                # serialization process
                def get_original_host(host):
                    if host.name in self._inventory._hosts_cache:
                        return self._inventory._hosts_cache[host.name]
                    else:
                        return self._inventory.get_host(host.name)

                # all host status messages contain 2 entries: (msg, task_result)
                if result[0] in ('host_task_ok', 'host_task_failed',
                                 'host_task_skipped', 'host_unreachable'):
                    task_result = result[1]
                    host = get_original_host(task_result._host)
                    task = task_result._task
                    if result[0] == 'host_task_failed' or task_result.is_failed(
                    ):
                        if not task.ignore_errors:
                            display.debug("marking %s as failed" % host.name)
                            if task.run_once:
                                # if we're using run_once, we have to fail every host here
                                [
                                    iterator.mark_host_failed(h)
                                    for h in self._inventory.get_hosts(
                                        iterator._play.hosts) if h.name not in
                                    self._tqm._unreachable_hosts
                                ]
                            else:
                                iterator.mark_host_failed(host)

                            # only add the host to the failed list officially if it has
                            # been failed by the iterator
                            if iterator.is_failed(host):
                                self._tqm._failed_hosts[host.name] = True
                                self._tqm._stats.increment(
                                    'failures', host.name)
                            else:
                                # otherwise, we grab the current state and if we're iterating on
                                # the rescue portion of a block then we save the failed task in a
                                # special var for use within the rescue/always
                                state, _ = iterator.get_next_task_for_host(
                                    host, peek=True)
                                if state.run_state == iterator.ITERATING_RESCUE:
                                    original_task = iterator.get_original_task(
                                        host, task)
                                    self._variable_manager.set_nonpersistent_facts(
                                        host,
                                        dict(
                                            ansible_failed_task=original_task.
                                            serialize(),
                                            ansible_failed_result=task_result.
                                            _result,
                                        ),
                                    )
                        else:
                            self._tqm._stats.increment('ok', host.name)
                        self._tqm.send_callback(
                            'v2_runner_on_failed',
                            task_result,
                            ignore_errors=task.ignore_errors)
                    elif result[0] == 'host_unreachable':
                        self._tqm._unreachable_hosts[host.name] = True
                        self._tqm._stats.increment('dark', host.name)
                        self._tqm.send_callback('v2_runner_on_unreachable',
                                                task_result)
                    elif result[0] == 'host_task_skipped':
                        self._tqm._stats.increment('skipped', host.name)
                        self._tqm.send_callback('v2_runner_on_skipped',
                                                task_result)
                    elif result[0] == 'host_task_ok':
                        if task.action != 'include':
                            self._tqm._stats.increment('ok', host.name)
                            if 'changed' in task_result._result and task_result._result[
                                    'changed']:
                                self._tqm._stats.increment(
                                    'changed', host.name)
                            self._tqm.send_callback('v2_runner_on_ok',
                                                    task_result)

                        if self._diff:
                            self._tqm.send_callback('v2_on_file_diff',
                                                    task_result)

                    self._pending_results -= 1
                    if host.name in self._blocked_hosts:
                        del self._blocked_hosts[host.name]

                    # If this is a role task, mark the parent role as being run (if
                    # the task was ok or failed, but not skipped or unreachable)
                    if task_result._task._role is not None and result[0] in (
                            'host_task_ok', 'host_task_failed'):
                        # lookup the role in the ROLE_CACHE to make sure we're dealing
                        # with the correct object and mark it as executed
                        for (entry,
                             role_obj) in iteritems(iterator._play.ROLE_CACHE[
                                 task_result._task._role._role_name]):
                            if role_obj._uuid == task_result._task._role._uuid:
                                role_obj._had_task_run[host.name] = True

                    ret_results.append(task_result)

                elif result[0] == 'add_host':
                    result_item = result[1]
                    new_host_info = result_item.get('add_host', dict())

                    self._add_host(new_host_info, iterator)

                elif result[0] == 'add_group':
                    host = get_original_host(result[1])
                    result_item = result[2]
                    self._add_group(host, result_item)

                elif result[0] == 'notify_handler':
                    task_result = result[1]
                    handler_name = result[2]

                    original_host = get_original_host(task_result._host)
                    original_task = iterator.get_original_task(
                        original_host, task_result._task)

                    def search_handler_blocks(handler_name, handler_blocks):
                        for handler_block in handler_blocks:
                            for handler_task in handler_block.block:
                                handler_vars = self._variable_manager.get_vars(
                                    loader=self._loader,
                                    play=iterator._play,
                                    task=handler_task)
                                templar = Templar(loader=self._loader,
                                                  variables=handler_vars)
                                try:
                                    # first we check with the full result of get_name(), which may
                                    # include the role name (if the handler is from a role). If that
                                    # is not found, we resort to the simple name field, which doesn't
                                    # have anything extra added to it.
                                    target_handler_name = templar.template(
                                        handler_task.name)
                                    if target_handler_name == handler_name:
                                        return handler_task
                                    else:
                                        target_handler_name = templar.template(
                                            handler_task.get_name())
                                        if target_handler_name == handler_name:
                                            return handler_task
                                except (UndefinedError,
                                        AnsibleUndefinedVariable):
                                    # We skip this handler due to the fact that it may be using
                                    # a variable in the name that was conditionally included via
                                    # set_fact or some other method, and we don't want to error
                                    # out unnecessarily
                                    continue
                        return None

                    # Find the handler using the above helper.  First we look up the
                    # dependency chain of the current task (if it's from a role), otherwise
                    # we just look through the list of handlers in the current play/all
                    # roles and use the first one that matches the notify name
                    target_handler = None
                    if original_task._role:
                        target_handler = search_handler_blocks(
                            handler_name,
                            original_task._role.get_handler_blocks())
                    if target_handler is None:
                        target_handler = search_handler_blocks(
                            handler_name, iterator._play.handlers)
                    if target_handler is None:
                        if handler_name in self._listening_handlers:
                            for listening_handler_name in self._listening_handlers[
                                    handler_name]:
                                listening_handler = None
                                if original_task._role:
                                    listening_handler = search_handler_blocks(
                                        listening_handler_name,
                                        original_task._role.get_handler_blocks(
                                        ))
                                if listening_handler is None:
                                    listening_handler = search_handler_blocks(
                                        listening_handler_name,
                                        iterator._play.handlers)
                                if listening_handler is None:
                                    raise AnsibleError(
                                        "The requested handler listener '%s' was not found in any of the known handlers"
                                        % listening_handler_name)

                                if original_host not in self._notified_handlers[
                                        listening_handler]:
                                    self._notified_handlers[
                                        listening_handler].append(
                                            original_host)
                                    display.vv("NOTIFIED HANDLER %s" %
                                               (listening_handler_name, ))
                        else:
                            raise AnsibleError(
                                "The requested handler '%s' was found in neither the main handlers list nor the listening handlers list"
                                % handler_name)
                    else:
                        if target_handler in self._notified_handlers:
                            if original_host not in self._notified_handlers[
                                    target_handler]:
                                self._notified_handlers[target_handler].append(
                                    original_host)
                                # FIXME: should this be a callback?
                                display.vv("NOTIFIED HANDLER %s" %
                                           (handler_name, ))

                elif result[0] == 'register_host_var':
                    # essentially the same as 'set_host_var' below, however we
                    # never follow the delegate_to value for registered vars and
                    # the variable goes in the fact_cache
                    host = get_original_host(result[1])
                    task = result[2]
                    var_value = wrap_var(result[3])
                    var_name = task.register

                    if task.run_once:
                        host_list = [
                            host for host in self._inventory.get_hosts(
                                iterator._play.hosts)
                            if host.name not in self._tqm._unreachable_hosts
                        ]
                    else:
                        host_list = [host]

                    for target_host in host_list:
                        self._variable_manager.set_nonpersistent_facts(
                            target_host, {var_name: var_value})

                elif result[0] in ('set_host_var', 'set_host_facts'):
                    host = get_original_host(result[1])
                    task = result[2]
                    item = result[3]

                    # find the host we're actually refering too here, which may
                    # be a host that is not really in inventory at all
                    if task.delegate_to is not None and task.delegate_facts:
                        task_vars = self._variable_manager.get_vars(
                            loader=self._loader,
                            play=iterator._play,
                            host=host,
                            task=task)
                        self.add_tqm_variables(task_vars, play=iterator._play)
                        loop_var = 'item'
                        if task.loop_control:
                            loop_var = task.loop_control.loop_var or 'item'
                        if item is not None:
                            task_vars[loop_var] = item
                        templar = Templar(loader=self._loader,
                                          variables=task_vars)
                        host_name = templar.template(task.delegate_to)
                        actual_host = self._inventory.get_host(host_name)
                        if actual_host is None:
                            actual_host = Host(name=host_name)
                    else:
                        actual_host = host

                    if task.run_once:
                        host_list = [
                            host for host in self._inventory.get_hosts(
                                iterator._play.hosts)
                            if host.name not in self._tqm._unreachable_hosts
                        ]
                    else:
                        host_list = [actual_host]

                    if result[0] == 'set_host_var':
                        var_name = result[4]
                        var_value = result[5]
                        for target_host in host_list:
                            self._variable_manager.set_host_variable(
                                target_host, var_name, var_value)
                    elif result[0] == 'set_host_facts':
                        facts = result[4]
                        for target_host in host_list:
                            if task.action == 'set_fact':
                                self._variable_manager.set_nonpersistent_facts(
                                    target_host, facts.copy())
                            else:
                                self._variable_manager.set_host_facts(
                                    target_host, facts.copy())
                elif result[0].startswith(
                        'v2_runner_item') or result[0] == 'v2_runner_retry':
                    self._tqm.send_callback(result[0], result[1])
                elif result[0] == 'v2_on_file_diff':
                    if self._diff:
                        self._tqm.send_callback('v2_on_file_diff', result[1])
                else:
                    raise AnsibleError("unknown result message received: %s" %
                                       result[0])

            except Queue.Empty:
                time.sleep(0.005)

            if one_pass:
                break

        return ret_results
Exemple #45
0
    def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True):
        '''
        Transfer and run a module along with its arguments.
        '''
        if task_vars is None:
            task_vars = dict()

        # if a module name was not specified for this execution, use
        # the action from the task
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args

        # set check mode in the module arguments, if required
        if self._play_context.check_mode:
            if not self._supports_check_mode:
                raise AnsibleError("check mode is not supported for this operation")
            module_args['_ansible_check_mode'] = True
        else:
            module_args['_ansible_check_mode'] = False

        # set no log in the module arguments, if required
        module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG

        # set debug in the module arguments, if required
        module_args['_ansible_debug'] = C.DEFAULT_DEBUG

        # let module know we are in diff mode
        module_args['_ansible_diff'] = self._play_context.diff

        # let module know our verbosity
        module_args['_ansible_verbosity'] = self._display.verbosity

        (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
        if not shebang:
            raise AnsibleError("module (%s) is missing interpreter line" % module_name)

        # a remote tmp path may be necessary and not already created
        remote_module_path = None
        args_file_path = None
        if not tmp and self._late_needs_tmp_path(tmp, module_style):
            tmp = self._make_tmp_path()

        if tmp:
            remote_module_filename = self._connection._shell.get_remote_filename(module_name)
            remote_module_path = self._connection._shell.join_path(tmp, remote_module_filename)
            if module_style in ['old', 'non_native_want_json']:
                # we'll also need a temp file to hold our module arguments
                args_file_path = self._connection._shell.join_path(tmp, 'args')

        if remote_module_path or module_style != 'new':
            display.debug("transferring module to remote")
            self._transfer_data(remote_module_path, module_data)
            if module_style == 'old':
                # we need to dump the module args to a k=v string in a file on
                # the remote system, which can be read and parsed by the module
                args_data = ""
                for k,v in iteritems(module_args):
                    args_data += '%s="%s" ' % (k, pipes.quote(text_type(v)))
                self._transfer_data(args_file_path, args_data)
            elif module_style == 'non_native_want_json':
                self._transfer_data(args_file_path, json.dumps(module_args))
            display.debug("done transferring module to remote")

        environment_string = self._compute_environment_string()

        if tmp and "tmp" in tmp and self._play_context.become and self._play_context.become_user != 'root':
            # deal with possible umask issues once sudo'ed to other user
            self._remote_chmod('a+r', remote_module_path)

        cmd = ""
        in_data = None

        if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':
            in_data = module_data
        else:
            if remote_module_path:
                cmd = remote_module_path

        rm_tmp = None
        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if not self._play_context.become or self._play_context.become_user == 'root':
                # not sudoing or sudoing to root, so can cleanup files in the same step
                rm_tmp = tmp

        cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path, rm_tmp=rm_tmp)
        cmd = cmd.strip()

        sudoable = True
        if module_name == "accelerate":
            # always run the accelerate module as the user
            # specified in the play, not the sudo_user
            sudoable = False

        res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)

        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if self._play_context.become and self._play_context.become_user != 'root':
                # not sudoing to root, so maybe can't delete files as that other user
                # have to clean up temp files as original user in a second step
                cmd2 = self._connection._shell.remove(tmp, recurse=True)
                self._low_level_execute_command(cmd2, sudoable=False)

        try:
            data = json.loads(self._filter_leading_non_json_lines(res.get('stdout', u'')))
        except ValueError:
            # not valid json, lets try to capture error
            data = dict(failed=True, parsed=False)
            if 'stderr' in res and res['stderr'].startswith(u'Traceback'):
                data['exception'] = res['stderr']
            else:
                data['msg'] = "MODULE FAILURE"
                data['module_stdout'] = res.get('stdout', u'')
                if 'stderr' in res:
                    data['module_stderr'] = res['stderr']

        # pre-split stdout into lines, if stdout is in the data and there
        # isn't already a stdout_lines value there
        if 'stdout' in data and 'stdout_lines' not in data:
            data['stdout_lines'] = data.get('stdout', u'').splitlines()

        display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
        return data
Exemple #46
0
    def _parse(self, err):

        all_hosts = {}

        # not passing from_remote because data from CMDB is trusted
        try:
            self.raw = self._loader.load(self.data)
        except Exception as e:
            sys.stderr.write(err + "\n")
            raise AnsibleError("failed to parse executable inventory script results from {0}: {1}".format(to_str(self.filename), to_str(e)))

        if not isinstance(self.raw, Mapping):
            sys.stderr.write(err + "\n")
            raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(to_str(self.filename)))

        self.raw  = json_dict_bytes_to_unicode(self.raw)

        group = None
        for (group_name, data) in self.raw.items():

            # in Ansible 1.3 and later, a "_meta" subelement may contain
            # a variable "hostvars" which contains a hash for each host
            # if this "hostvars" exists at all then do not call --host for each
            # host.  This is for efficiency and scripts should still return data
            # if called with --host for backwards compat with 1.2 and earlier.

            if group_name == '_meta':
                if 'hostvars' in data:
                    self.host_vars_from_top = data['hostvars']
                    continue

            if group_name not in self.groups:
                group = self.groups[group_name] = Group(group_name)

            group = self.groups[group_name]
            host = None

            if not isinstance(data, dict):
                data = {'hosts': data}
            # is not those subkeys, then simplified syntax, host with vars
            elif not any(k in data for k in ('hosts','vars','children')):
                data = {'hosts': [group_name], 'vars': data}

            if 'hosts' in data:
                if not isinstance(data['hosts'], list):
                    raise AnsibleError("You defined a group \"%s\" with bad "
                        "data for the host list:\n %s" % (group_name, data))

                for hostname in data['hosts']:
                    if hostname not in all_hosts:
                        all_hosts[hostname] = Host(hostname)
                    host = all_hosts[hostname]
                    group.add_host(host)

            if 'vars' in data:
                if not isinstance(data['vars'], dict):
                    raise AnsibleError("You defined a group \"%s\" with bad "
                        "data for variables:\n %s" % (group_name, data))

                for k, v in iteritems(data['vars']):
                    group.set_variable(k, v)

        # Separate loop to ensure all groups are defined
        for (group_name, data) in self.raw.items():
            if group_name == '_meta':
                continue
            if isinstance(data, dict) and 'children' in data:
                for child_name in data['children']:
                    if child_name in self.groups:
                        self.groups[group_name].add_child_group(self.groups[child_name])

        # Finally, add all top-level groups as children of 'all'.
        # We exclude ungrouped here because it was already added as a child of
        # 'all' at the time it was created.

        for group in self.groups.values():
            if group.depth == 0 and group.name not in ('all', 'ungrouped'):
                self.groups['all'].add_child_group(group)
Exemple #47
0
    def _execute(self, variables=None):
        '''
        The primary workhorse of the executor system, this runs the task
        on the specified host (which may be the delegated_to host) and handles
        the retry/until and block rescue/always execution
        '''

        if variables is None:
            variables = self._job_vars

        templar = Templar(loader=self._loader,
                          shared_loader_obj=self._shared_loader_obj,
                          variables=variables)

        context_validation_error = None
        try:
            # apply the given task's information to the connection info,
            # which may override some fields already set by the play or
            # the options specified on the command line
            self._play_context = self._play_context.set_task_and_variable_override(
                task=self._task, variables=variables, templar=templar)

            # fields set from the play/task may be based on variables, so we have to
            # do the same kind of post validation step on it here before we use it.
            self._play_context.post_validate(templar=templar)

            # now that the play context is finalized, if the remote_addr is not set
            # default to using the host's address field as the remote address
            if not self._play_context.remote_addr:
                self._play_context.remote_addr = self._host.address

            # We also add "magic" variables back into the variables dict to make sure
            # a certain subset of variables exist.
            self._play_context.update_vars(variables)
        except AnsibleError as e:
            # save the error, which we'll raise later if we don't end up
            # skipping this task during the conditional evaluation step
            context_validation_error = e

        # Evaluate the conditional (if any) for this task, which we do before running
        # the final task post-validation. We do this before the post validation due to
        # the fact that the conditional may specify that the task be skipped due to a
        # variable not being present which would otherwise cause validation to fail
        try:
            if not self._task.evaluate_conditional(templar, variables):
                display.debug("when evaluation failed, skipping this task")
                return dict(changed=False,
                            skipped=True,
                            skip_reason='Conditional check failed',
                            _ansible_no_log=self._play_context.no_log)
            # since we're not skipping, if there was a loop evaluation error
            # raised earlier we need to raise it now to halt the execution of
            # this task
            if self._loop_eval_error is not None:
                raise self._loop_eval_error
        except AnsibleError:
            # skip conditional exception in the case of includes as the vars needed might not be avaiable except in the included tasks or due to tags
            if self._task.action not in ['include', 'include_role']:
                raise

        # if we ran into an error while setting up the PlayContext, raise it now
        if context_validation_error is not None:
            raise context_validation_error

        # if this task is a TaskInclude, we just return now with a success code so the
        # main thread can expand the task list for the given host
        if self._task.action == 'include':
            include_variables = self._task.args.copy()
            include_file = include_variables.pop('_raw_params', None)
            if not include_file:
                return dict(failed=True,
                            msg="No include file was specified to the include")

            include_file = templar.template(include_file)
            return dict(include=include_file,
                        include_variables=include_variables)

        # if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host
        elif self._task.action == 'include_role':
            include_variables = self._task.args.copy()
            return dict(include_role=self._task,
                        include_variables=include_variables)

        # Now we do final validation on the task, which sets all fields to their final values.
        self._task.post_validate(templar=templar)
        if '_variable_params' in self._task.args:
            variable_params = self._task.args.pop('_variable_params')
            if isinstance(variable_params, dict):
                display.deprecated(
                    "Using variables for task params is unsafe, especially if the variables come from an external source like facts"
                )
                variable_params.update(self._task.args)
                self._task.args = variable_params

        # get the connection and the handler for this execution
        if not self._connection or not getattr(
                self._connection, 'connected', False
        ) or self._play_context.remote_addr != self._connection._play_context.remote_addr:
            self._connection = self._get_connection(variables=variables,
                                                    templar=templar)
            hostvars = variables.get('hostvars', None)
            if hostvars:
                try:
                    target_hostvars = hostvars.raw_get(self._host.name)
                except:
                    # FIXME: this should catch the j2undefined error here
                    #        specifically instead of all exceptions
                    target_hostvars = dict()
            else:
                target_hostvars = dict()
            self._connection.set_host_overrides(host=self._host,
                                                hostvars=target_hostvars)
        else:
            # if connection is reused, its _play_context is no longer valid and needs
            # to be replaced with the one templated above, in case other data changed
            self._connection._play_context = self._play_context

        self._handler = self._get_action_handler(connection=self._connection,
                                                 templar=templar)

        # And filter out any fields which were set to default(omit), and got the omit token value
        omit_token = variables.get('omit')
        if omit_token is not None:
            self._task.args = dict((i[0], i[1])
                                   for i in iteritems(self._task.args)
                                   if i[1] != omit_token)

        # Read some values from the task, so that we can modify them if need be
        if self._task.until:
            retries = self._task.retries
            if retries is None:
                retries = 3
            elif retries <= 0:
                retries = 1
            else:
                retries += 1
        else:
            retries = 1

        delay = self._task.delay
        if delay < 0:
            delay = 1

        # make a copy of the job vars here, in case we need to update them
        # with the registered variable value later on when testing conditions
        vars_copy = variables.copy()

        display.debug("starting attempt loop")
        result = None
        for attempt in range(1, retries + 1):
            display.debug("running the handler")
            try:
                result = self._handler.run(task_vars=variables)
            except AnsibleConnectionFailure as e:
                return dict(unreachable=True, msg=to_text(e))
            display.debug("handler run complete")

            # preserve no log
            result["_ansible_no_log"] = self._play_context.no_log

            # update the local copy of vars with the registered value, if specified,
            # or any facts which may have been generated by the module execution
            if self._task.register:
                vars_copy[self._task.register] = wrap_var(result.copy())

            if self._task. async > 0:
                if self._task.poll > 0 and not result.get('skipped'):
                    result = self._poll_async_result(result=result,
                                                     templar=templar,
                                                     task_vars=vars_copy)

                # ensure no log is preserved
                result["_ansible_no_log"] = self._play_context.no_log

            # helper methods for use below in evaluating changed/failed_when
            def _evaluate_changed_when_result(result):
                if self._task.changed_when is not None and self._task.changed_when:
                    cond = Conditional(loader=self._loader)
                    cond.when = self._task.changed_when
                    result['changed'] = cond.evaluate_conditional(
                        templar, vars_copy)

            def _evaluate_failed_when_result(result):
                if self._task.failed_when:
                    cond = Conditional(loader=self._loader)
                    cond.when = self._task.failed_when
                    failed_when_result = cond.evaluate_conditional(
                        templar, vars_copy)
                    result['failed_when_result'] = result[
                        'failed'] = failed_when_result
                else:
                    failed_when_result = False
                return failed_when_result

            if 'ansible_facts' in result:
                vars_copy.update(result['ansible_facts'])

            # set the failed property if the result has a non-zero rc. This will be
            # overridden below if the failed_when property is set
            if result.get('rc', 0) != 0:
                result['failed'] = True

            # if we didn't skip this task, use the helpers to evaluate the changed/
            # failed_when properties
            if 'skipped' not in result:
                _evaluate_changed_when_result(result)
                _evaluate_failed_when_result(result)

            if retries > 1:
                cond = Conditional(loader=self._loader)
                cond.when = self._task.until
                result['attempts'] = attempt
                if cond.evaluate_conditional(templar, vars_copy):
                    break
                else:
                    # no conditional check, or it failed, so sleep for the specified time
                    if attempt < retries:
                        result['_ansible_retry'] = True
                        result['retries'] = retries
                        display.debug('Retrying task, attempt %d of %d' %
                                      (attempt, retries))
                        self._rslt_q.put(TaskResult(self._host.name,
                                                    self._task._uuid, result),
                                         block=False)
                        time.sleep(delay)
Exemple #48
0
    def _load_role_data(self, role_include, parent_role=None):
        self._role_name        = role_include.role
        self._role_path        = role_include.get_role_path()
        self._role_params      = role_include.get_role_params()
        self._variable_manager = role_include.get_variable_manager()
        self._loader           = role_include.get_loader()

        if parent_role:
            self.add_parent(parent_role)

        # copy over all field attributes, except for when and tags, which
        # are special cases and need to preserve pre-existing values
        for (attr_name, _) in iteritems(self._valid_attrs):
            if attr_name not in ('when', 'tags'):
                setattr(self, attr_name, getattr(role_include, attr_name))

        current_when = getattr(self, 'when')[:]
        current_when.extend(role_include.when)
        setattr(self, 'when', current_when)

        current_tags = getattr(self, 'tags')[:]
        current_tags.extend(role_include.tags)
        setattr(self, 'tags', current_tags)

        # dynamically load any plugins from the role directory
        for name, obj in get_all_plugin_loaders():
            if obj.subdir:
                plugin_path = os.path.join(self._role_path, obj.subdir)
                if os.path.isdir(plugin_path):
                    obj.add_directory(plugin_path)

        # load the role's other files, if they exist
        metadata = self._load_role_yaml('meta')
        if metadata:
            self._metadata = RoleMetadata.load(metadata, owner=self, variable_manager=self._variable_manager, loader=self._loader)
            self._dependencies = self._load_dependencies()
        else:
            self._metadata = RoleMetadata()

        task_data = self._load_role_yaml('tasks', main=self._from_files.get('tasks'))
        if task_data:
            try:
                self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader, variable_manager=self._variable_manager)
            except AssertionError:
                raise AnsibleParserError("The tasks/main.yml file for role '%s' must contain a list of tasks" % self._role_name , obj=task_data)

        handler_data = self._load_role_yaml('handlers')
        if handler_data:
            try:
                self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader, variable_manager=self._variable_manager)
            except AssertionError:
                raise AnsibleParserError("The handlers/main.yml file for role '%s' must contain a list of tasks" % self._role_name , obj=handler_data)

        # vars and default vars are regular dictionaries
        self._role_vars  = self._load_role_yaml('vars', main=self._from_files.get('vars'))
        if self._role_vars is None:
            self._role_vars = dict()
        elif not isinstance(self._role_vars, dict):
            raise AnsibleParserError("The vars/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)

        self._default_vars = self._load_role_yaml('defaults', main=self._from_files.get('defaults'))
        if self._default_vars is None:
            self._default_vars = dict()
        elif not isinstance(self._default_vars, dict):
            raise AnsibleParserError("The defaults/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
Exemple #49
0
    def set_task_and_variable_override(self, task, variables, templar):
        '''
        Sets attributes from the task if they are set, which will override
        those from the play.
        '''

        new_info = self.copy()

        # loop through a subset of attributes on the task object and set
        # connection fields based on their values
        for attr in TASK_ATTRIBUTE_OVERRIDES:
            if hasattr(task, attr):
                attr_val = getattr(task, attr)
                if attr_val is not None:
                    setattr(new_info, attr, attr_val)

        # next, use the MAGIC_VARIABLE_MAPPING dictionary to update this
        # connection info object with 'magic' variables from the variable list.
        # If the value 'ansible_delegated_vars' is in the variables, it means
        # we have a delegated-to host, so we check there first before looking
        # at the variables in general
        if task.delegate_to is not None:
            # In the case of a loop, the delegated_to host may have been
            # templated based on the loop variable, so we try and locate
            # the host name in the delegated variable dictionary here
            delegated_host_name = templar.template(task.delegate_to)
            delegated_vars = variables.get('ansible_delegated_vars',
                                           dict()).get(delegated_host_name,
                                                       dict())

            delegated_transport = C.DEFAULT_TRANSPORT
            for transport_var in MAGIC_VARIABLE_MAPPING.get('connection'):
                if transport_var in delegated_vars:
                    delegated_transport = delegated_vars[transport_var]
                    break

            # make sure this delegated_to host has something set for its remote
            # address, otherwise we default to connecting to it by name. This
            # may happen when users put an IP entry into their inventory, or if
            # they rely on DNS for a non-inventory hostname
            for address_var in MAGIC_VARIABLE_MAPPING.get('remote_addr'):
                if address_var in delegated_vars:
                    break
            else:
                display.debug(
                    "no remote address found for delegated host %s\nusing its name, so success depends on DNS resolution"
                    % delegated_host_name)
                delegated_vars['ansible_host'] = delegated_host_name

            # reset the port back to the default if none was specified, to prevent
            # the delegated host from inheriting the original host's setting
            for port_var in MAGIC_VARIABLE_MAPPING.get('port'):
                if port_var in delegated_vars:
                    break
            else:
                if delegated_transport == 'winrm':
                    delegated_vars['ansible_port'] = 5986
                else:
                    delegated_vars['ansible_port'] = C.DEFAULT_REMOTE_PORT

            # and likewise for the remote user
            for user_var in MAGIC_VARIABLE_MAPPING.get('remote_user'):
                if user_var in delegated_vars:
                    break
            else:
                delegated_vars[
                    'ansible_user'] = task.remote_user or self.remote_user
        else:
            delegated_vars = dict()

        for (attr, variable_names) in iteritems(MAGIC_VARIABLE_MAPPING):
            for variable_name in variable_names:
                if isinstance(delegated_vars,
                              dict) and variable_name in delegated_vars:
                    setattr(new_info, attr, delegated_vars[variable_name])
                elif variable_name in variables:
                    setattr(new_info, attr, variables[variable_name])

        # make sure we get port defaults if needed
        if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None:
            new_info.port = int(C.DEFAULT_REMOTE_PORT)

        # become legacy updates
        if not new_info.become_pass:
            if new_info.become_method == 'sudo' and new_info.sudo_pass:
                setattr(new_info, 'become_pass', new_info.sudo_pass)
            elif new_info.become_method == 'su' and new_info.su_pass:
                setattr(new_info, 'become_pass', new_info.su_pass)

        # special overrides for the connection setting
        if len(delegated_vars) > 0:
            # in the event that we were using local before make sure to reset the
            # connection type to the default transport for the delegated-to host,
            # if not otherwise specified
            for connection_type in MAGIC_VARIABLE_MAPPING.get('connection'):
                if connection_type in delegated_vars:
                    break
            else:
                remote_addr_local = new_info.remote_addr in C.LOCALHOST
                inv_hostname_local = delegated_vars.get(
                    'inventory_hostname') in C.LOCALHOST
                if remote_addr_local and inv_hostname_local:
                    setattr(new_info, 'connection', 'local')
                elif getattr(new_info, 'connection',
                             None) == 'local' and (not remote_addr_local
                                                   or not inv_hostname_local):
                    setattr(new_info, 'connection', C.DEFAULT_TRANSPORT)

        # set no_log to default if it was not previouslly set
        if new_info.no_log is None:
            new_info.no_log = C.DEFAULT_NO_LOG

        # set become defaults if not previouslly set
        task.set_become_defaults(new_info.become, new_info.become_method,
                                 new_info.become_user)

        # have always_run override check mode
        if task.always_run:
            new_info.check_mode = False

        return new_info
Exemple #50
0
    def _load_role_data(self, role_include, parent_role=None):
        self._role_name = role_include.role
        self._role_path = role_include.get_role_path()
        self._role_params = role_include.get_role_params()
        self._variable_manager = role_include.get_variable_manager()
        self._loader = role_include.get_loader()

        if parent_role:
            self.add_parent(parent_role)

        # copy over all field attributes, except for when and tags, which
        # are special cases and need to preserve pre-existing values
        for (attr_name, _) in iteritems(self._get_base_attributes()):
            if attr_name not in ('when', 'tags'):
                setattr(self, attr_name, getattr(role_include, attr_name))

        current_when = getattr(self, 'when')[:]
        current_when.extend(role_include.when)
        setattr(self, 'when', current_when)

        current_tags = getattr(self, 'tags')[:]
        current_tags.extend(role_include.tags)
        setattr(self, 'tags', current_tags)

        # dynamically load any plugins from the role directory
        for name, obj in get_all_plugin_loaders():
            if obj.subdir:
                plugin_path = os.path.join(self._role_path, obj.subdir)
                if os.path.isdir(plugin_path):
                    obj.add_directory(plugin_path)

        # load the role's other files, if they exist
        metadata = self._load_role_yaml('meta')
        if metadata:
            self._metadata = RoleMetadata.load(
                metadata,
                owner=self,
                variable_manager=self._variable_manager,
                loader=self._loader)
            self._dependencies = self._load_dependencies()
        else:
            self._metadata = RoleMetadata()

        task_data = self._load_role_yaml('tasks')
        if task_data:
            try:
                self._task_blocks = load_list_of_blocks(
                    task_data,
                    play=self._play,
                    role=self,
                    loader=self._loader,
                    variable_manager=self._variable_manager)
            except AssertionError:
                raise AnsibleParserError(
                    "The tasks/main.yml file for role '%s' must contain a list of tasks"
                    % self._role_name,
                    obj=task_data)

        handler_data = self._load_role_yaml('handlers')
        if handler_data:
            try:
                self._handler_blocks = load_list_of_blocks(
                    handler_data,
                    play=self._play,
                    role=self,
                    use_handlers=True,
                    loader=self._loader,
                    variable_manager=self._variable_manager)
            except AssertionError:
                raise AnsibleParserError(
                    "The handlers/main.yml file for role '%s' must contain a list of tasks"
                    % self._role_name,
                    obj=handler_data)

        # vars and default vars are regular dictionaries
        self._role_vars = self._load_role_yaml('vars')
        if self._role_vars is None:
            self._role_vars = dict()
        elif not isinstance(self._role_vars, dict):
            raise AnsibleParserError(
                "The vars/main.yml file for role '%s' must contain a dictionary of variables"
                % self._role_name)

        self._default_vars = self._load_role_yaml('defaults')
        if self._default_vars is None:
            self._default_vars = dict()
        elif not isinstance(self._default_vars, dict):
            raise AnsibleParserError(
                "The defaults/main.yml file for role '%s' must contain a dictionary of variables"
                % self._role_name)
Exemple #51
0
    def post_validate(self, templar):
        '''
        we can't tell that everything is of the right type until we have
        all the variables.  Run basic types (from isa) as well as
        any _post_validate_<foo> functions.
        '''

        # save the omit value for later checking
        omit_value = templar._available_variables.get('omit')

        for (name, attribute) in iteritems(self._valid_attrs):

            if getattr(self, name) is None:
                if not attribute.required:
                    continue
                else:
                    raise AnsibleParserError(
                        "the field '%s' is required but was not set" % name)
            elif not attribute.always_post_validate and self.__class__.__name__ not in (
                    'Task', 'Handler', 'PlayContext'):
                # Intermediate objects like Play() won't have their fields validated by
                # default, as their values are often inherited by other objects and validated
                # later, so we don't want them to fail out early
                continue

            try:
                # Run the post-validator if present. These methods are responsible for
                # using the given templar to template the values, if required.
                method = getattr(self, '_post_validate_%s' % name, None)
                if method:
                    value = method(attribute, getattr(self, name), templar)
                elif attribute.isa == 'class':
                    value = getattr(self, name)
                else:
                    # if the attribute contains a variable, template it now
                    value = templar.template(getattr(self, name))

                # if this evaluated to the omit value, set the value back to
                # the default specified in the FieldAttribute and move on
                if omit_value is not None and value == omit_value:
                    setattr(self, name, attribute.default)
                    continue

                # and make sure the attribute is of the type it should be
                if value is not None:
                    if attribute.isa == 'string':
                        value = to_text(value)
                    elif attribute.isa == 'int':
                        value = int(value)
                    elif attribute.isa == 'float':
                        value = float(value)
                    elif attribute.isa == 'bool':
                        value = boolean(value)
                    elif attribute.isa == 'percent':
                        # special value, which may be an integer or float
                        # with an optional '%' at the end
                        if isinstance(value, string_types) and '%' in value:
                            value = value.replace('%', '')
                        value = float(value)
                    elif attribute.isa in ('list', 'barelist'):
                        if value is None:
                            value = []
                        elif not isinstance(value, list):
                            if isinstance(value, string_types
                                          ) and attribute.isa == 'barelist':
                                display.deprecated(
                                    "Using comma separated values for a list has been deprecated. "
                                    "You should instead use the correct YAML syntax for lists. "
                                )
                                value = value.split(',')
                            else:
                                value = [value]
                        if attribute.listof is not None:
                            for item in value:
                                if not isinstance(item, attribute.listof):
                                    raise AnsibleParserError(
                                        "the field '%s' should be a list of %s,"
                                        " but the item '%s' is a %s" %
                                        (name, attribute.listof, item,
                                         type(item)),
                                        obj=self.get_ds())
                                elif attribute.required and attribute.listof == string_types:
                                    if item is None or item.strip() == "":
                                        raise AnsibleParserError(
                                            "the field '%s' is required, and cannot have empty values"
                                            % (name, ),
                                            obj=self.get_ds())
                    elif attribute.isa == 'set':
                        if value is None:
                            value = set()
                        elif not isinstance(value, (list, set)):
                            if isinstance(value, string_types):
                                value = value.split(',')
                            else:
                                # Making a list like this handles strings of
                                # text and bytes properly
                                value = [value]
                        if not isinstance(value, set):
                            value = set(value)
                    elif attribute.isa == 'dict':
                        if value is None:
                            value = dict()
                        elif not isinstance(value, dict):
                            raise TypeError("%s is not a dictionary" % value)
                    elif attribute.isa == 'class':
                        if not isinstance(value, attribute.class_type):
                            raise TypeError(
                                "%s is not a valid %s (got a %s instead)" %
                                (name, attribute.class_type, type(value)))
                        value.post_validate(templar=templar)

                # and assign the massaged value back to the attribute field
                setattr(self, name, value)

            except (TypeError, ValueError) as e:
                raise AnsibleParserError(
                    "the field '%s' has an invalid value (%s), and could not be converted to an %s."
                    " Error was: %s" % (name, value, attribute.isa, e),
                    obj=self.get_ds())
            except (AnsibleUndefinedVariable, UndefinedError) as e:
                if templar._fail_on_undefined_errors and name != 'name':
                    raise AnsibleParserError(
                        "the field '%s' has an invalid value, which appears to include a variable that is undefined."
                        " The error was: %s" % (name, e),
                        obj=self.get_ds())

        self._finalized = True
Exemple #52
0
    def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True):
        '''
        Transfer and run a module along with its arguments.
        '''
        if task_vars is None:
            task_vars = dict()

        # if a module name was not specified for this execution, use
        # the action from the task
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args

        # set check mode in the module arguments, if required
        if self._play_context.check_mode:
            if not self._supports_check_mode:
                raise AnsibleError("check mode is not supported for this operation")
            module_args['_ansible_check_mode'] = True
        else:
            module_args['_ansible_check_mode'] = False

        # Get the connection user for permission checks
        remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user

        # set no log in the module arguments, if required
        module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG

        # set debug in the module arguments, if required
        module_args['_ansible_debug'] = C.DEFAULT_DEBUG

        # let module know we are in diff mode
        module_args['_ansible_diff'] = self._play_context.diff

        # let module know our verbosity
        module_args['_ansible_verbosity'] = display.verbosity

        # give the module information about the ansible version
        module_args['_ansible_version'] = __version__

        # set the syslog facility to be used in the module
        module_args['_ansible_syslog_facility'] = task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY)

        # let module know about filesystems that selinux treats specially
        module_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS

        (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
        if not shebang:
            raise AnsibleError("module (%s) is missing interpreter line" % module_name)

        # a remote tmp path may be necessary and not already created
        remote_module_path = None
        args_file_path = None
        if not tmp and self._late_needs_tmp_path(tmp, module_style):
            tmp = self._make_tmp_path(remote_user)

        if tmp:
            remote_module_filename = self._connection._shell.get_remote_filename(module_name)
            remote_module_path = self._connection._shell.join_path(tmp, remote_module_filename)
            if module_style in ['old', 'non_native_want_json']:
                # we'll also need a temp file to hold our module arguments
                args_file_path = self._connection._shell.join_path(tmp, 'args')

        if remote_module_path or module_style != 'new':
            display.debug("transferring module to remote")
            self._transfer_data(remote_module_path, module_data)
            if module_style == 'old':
                # we need to dump the module args to a k=v string in a file on
                # the remote system, which can be read and parsed by the module
                args_data = ""
                for k,v in iteritems(module_args):
                    args_data += '%s=%s ' % (k, pipes.quote(text_type(v)))
                self._transfer_data(args_file_path, args_data)
            elif module_style == 'non_native_want_json':
                self._transfer_data(args_file_path, json.dumps(module_args))
            display.debug("done transferring module to remote")

        environment_string = self._compute_environment_string()

        remote_files = None

        if args_file_path:
            remote_files = tmp, remote_module_path, args_file_path
        elif remote_module_path:
            remote_files = tmp, remote_module_path

        # Fix permissions of the tmp path and tmp files.  This should be
        # called after all files have been transferred.
        if remote_files:
            self._fixup_perms2(remote_files, remote_user)

        cmd = ""
        in_data = None

        if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':
            in_data = module_data
        else:
            if remote_module_path:
                cmd = remote_module_path

        rm_tmp = None
        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if not self._play_context.become or self._play_context.become_user == 'root':
                # not sudoing or sudoing to root, so can cleanup files in the same step
                rm_tmp = tmp

        cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path, rm_tmp=rm_tmp)
        cmd = cmd.strip()

        sudoable = True
        if module_name == "accelerate":
            # always run the accelerate module as the user
            # specified in the play, not the sudo_user
            sudoable = False

        res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)

        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if self._play_context.become and self._play_context.become_user != 'root':
                # not sudoing to root, so maybe can't delete files as that other user
                # have to clean up temp files as original user in a second step
                tmp_rm_cmd = self._connection._shell.remove(tmp, recurse=True)
                tmp_rm_res = self._low_level_execute_command(tmp_rm_cmd, sudoable=False)
                tmp_rm_data = self._parse_returned_data(tmp_rm_res)
                if tmp_rm_data.get('rc', 0) != 0:
                    display.warning('Error deleting remote temporary files (rc: {0}, stderr: {1})'.format(tmp_rm_res.get('rc'), tmp_rm_res.get('stderr', 'No error string available.')))

        # parse the main result
        data = self._parse_returned_data(res)

        # pre-split stdout into lines, if stdout is in the data and there
        # isn't already a stdout_lines value there
        if 'stdout' in data and 'stdout_lines' not in data:
            data['stdout_lines'] = data.get('stdout', u'').splitlines()

        display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
        return data
Exemple #53
0
    def parse(self):
        '''
        Given a task in one of the supported forms, parses and returns
        returns the action, arguments, and delegate_to values for the
        task, dealing with all sorts of levels of fuzziness.
        '''

        thing      = None

        action      = None
        delegate_to = self._task_ds.get('delegate_to', None)
        args        = dict()


        # this is the 'extra gross' scenario detailed above, so we grab
        # the args and pass them in as additional arguments, which can/will
        # be overwritten via dict updates from the other arg sources below
        additional_args = self._task_ds.get('args', dict())

        # We can have one of action, local_action, or module specified
        # action
        if 'action' in self._task_ds:
            # an old school 'action' statement
            thing = self._task_ds['action']
            action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)


        # local_action
        if 'local_action' in self._task_ds:
            # local_action is similar but also implies a delegate_to
            if action is not None:
                raise AnsibleParserError("action and local_action are mutually exclusive", obj=self._task_ds)
            thing = self._task_ds.get('local_action', '')
            delegate_to = 'localhost'
            action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)

        # module: <stuff> is the more new-style invocation

        # walk the input dictionary to see we recognize a module name
        for (item, value) in iteritems(self._task_ds):
            if item in module_loader or item in ['meta', 'include', 'include_role']:
                # finding more than one module name is a problem
                if action is not None:
                    raise AnsibleParserError("conflicting action statements", obj=self._task_ds)
                action = item
                thing = value
                action, args = self._normalize_parameters(thing, action=action, additional_args=additional_args)


        # if we didn't see any module in the task at all, it's not a task really
        if action is None:
            if 'ping' not in module_loader:
                raise AnsibleParserError("The requested action was not found in configured module paths. "
                        "Additionally, core modules are missing. If this is a checkout, "
                        "run 'git pull --rebase' to correct this problem.",
                        obj=self._task_ds)

            else:
                raise AnsibleParserError("no action detected in task. This often indicates a misspelled module name, or incorrect module path.", obj=self._task_ds)
        elif args.get('_raw_params', '') != '' and action not in RAW_PARAM_MODULES:
            templar = Templar(loader=None)
            raw_params = args.pop('_raw_params')
            if templar._contains_vars(raw_params):
                args['_variable_params'] = raw_params
            else:
                raise AnsibleParserError("this task '%s' has extra params, which is only allowed in the following modules: %s" % (action, ", ".join(RAW_PARAM_MODULES)), obj=self._task_ds)

        # shell modules require special handling
        (action, args) = self._handle_shell_weirdness(action, args)

        return (action, args, delegate_to)
    def _execute(self, variables=None):
        '''
        The primary workhorse of the executor system, this runs the task
        on the specified host (which may be the delegated_to host) and handles
        the retry/until and block rescue/always execution
        '''

        if variables is None:
            variables = self._job_vars

        templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)

        context_validation_error = None
        try:
            # apply the given task's information to the connection info,
            # which may override some fields already set by the play or
            # the options specified on the command line
            self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar)

            # fields set from the play/task may be based on variables, so we have to
            # do the same kind of post validation step on it here before we use it.
            self._play_context.post_validate(templar=templar)

            # now that the play context is finalized, if the remote_addr is not set
            # default to using the host's address field as the remote address
            if not self._play_context.remote_addr:
                self._play_context.remote_addr = self._host.address

            # We also add "magic" variables back into the variables dict to make sure
            # a certain subset of variables exist.
            self._play_context.update_vars(variables)
        except AnsibleError as e:
            # save the error, which we'll raise later if we don't end up
            # skipping this task during the conditional evaluation step
            context_validation_error = e

        # Evaluate the conditional (if any) for this task, which we do before running
        # the final task post-validation. We do this before the post validation due to
        # the fact that the conditional may specify that the task be skipped due to a
        # variable not being present which would otherwise cause validation to fail
        try:
            if not self._task.evaluate_conditional(templar, variables):
                display.debug("when evaluation failed, skipping this task")
                return dict(changed=False, skipped=True, skip_reason='Conditional check failed', _ansible_no_log=self._play_context.no_log)
        except AnsibleError:
            # skip conditional exception in the case of includes as the vars needed might not be avaiable except in the included tasks or due to tags
            if self._task.action != 'include':
                raise

        # if we ran into an error while setting up the PlayContext, raise it now
        if context_validation_error is not None:
            raise context_validation_error

        # if this task is a TaskInclude, we just return now with a success code so the
        # main thread can expand the task list for the given host
        if self._task.action == 'include':
            include_variables = self._task.args.copy()
            include_file = include_variables.pop('_raw_params', None)
            if not include_file:
                return dict(failed=True, msg="No include file was specified to the include")

            include_file = templar.template(include_file)
            return dict(include=include_file, include_variables=include_variables)

        # Now we do final validation on the task, which sets all fields to their final values.
        self._task.post_validate(templar=templar)
        if '_variable_params' in self._task.args:
            variable_params = self._task.args.pop('_variable_params')
            if isinstance(variable_params, dict):
                display.deprecated("Using variables for task params is unsafe, especially if the variables come from an external source like facts")
                variable_params.update(self._task.args)
                self._task.args = variable_params

        # get the connection and the handler for this execution
        if not self._connection or not getattr(self._connection, 'connected', False) or self._play_context.remote_addr != self._connection._play_context.remote_addr:
            self._connection = self._get_connection(variables=variables, templar=templar)
            self._connection.set_host_overrides(host=self._host)
        else:
            # if connection is reused, its _play_context is no longer valid and needs
            # to be replaced with the one templated above, in case other data changed
            self._connection._play_context = self._play_context

        self._handler = self._get_action_handler(connection=self._connection, templar=templar)

        # And filter out any fields which were set to default(omit), and got the omit token value
        omit_token = variables.get('omit')
        if omit_token is not None:
            self._task.args = dict((i[0], i[1]) for i in iteritems(self._task.args) if i[1] != omit_token)

        # Read some values from the task, so that we can modify them if need be
        if self._task.until:
            retries = self._task.retries
            if retries is None:
                retries = 3
        else:
            retries = 1

        delay = self._task.delay
        if delay < 0:
            delay = 1

        # make a copy of the job vars here, in case we need to update them
        # with the registered variable value later on when testing conditions
        vars_copy = variables.copy()

        display.debug("starting attempt loop")
        result = None
        for attempt in range(1, retries + 1):
            display.debug("running the handler")
            try:
                result = self._handler.run(task_vars=variables)
            except AnsibleConnectionFailure as e:
                return dict(unreachable=True, msg=to_unicode(e))
            display.debug("handler run complete")

            # preserve no log
            result["_ansible_no_log"] = self._play_context.no_log

            # update the local copy of vars with the registered value, if specified,
            # or any facts which may have been generated by the module execution
            if self._task.register:
                vars_copy[self._task.register] = wrap_var(result.copy())

            if self._task.async > 0:
                # the async_wrapper module returns dumped JSON via its stdout
                # response, so we parse it here and replace the result
                try:
                    if 'skipped' in result and result['skipped'] or 'failed' in result and result['failed']:
                        return result
                    result = json.loads(result.get('stdout'))
                except (TypeError, ValueError) as e:
                    return dict(failed=True, msg=u"The async task did not return valid JSON: %s" % to_unicode(e))

                if self._task.poll > 0:
                    result = self._poll_async_result(result=result, templar=templar)

                # ensure no log is preserved
                result["_ansible_no_log"] = self._play_context.no_log

            # helper methods for use below in evaluating changed/failed_when
            def _evaluate_changed_when_result(result):
                if self._task.changed_when is not None and self._task.changed_when:
                    cond = Conditional(loader=self._loader)
                    cond.when = self._task.changed_when
                    result['changed'] = cond.evaluate_conditional(templar, vars_copy)

            def _evaluate_failed_when_result(result):
                if self._task.failed_when:
                    cond = Conditional(loader=self._loader)
                    cond.when = self._task.failed_when
                    failed_when_result = cond.evaluate_conditional(templar, vars_copy)
                    result['failed_when_result'] = result['failed'] = failed_when_result
                else:
                    failed_when_result = False
                return failed_when_result

            if 'ansible_facts' in result:
                vars_copy.update(result['ansible_facts'])

            # set the failed property if the result has a non-zero rc. This will be
            # overridden below if the failed_when property is set
            if result.get('rc', 0) != 0:
                result['failed'] = True

            # if we didn't skip this task, use the helpers to evaluate the changed/
            # failed_when properties
            if 'skipped' not in result:
                _evaluate_changed_when_result(result)
                _evaluate_failed_when_result(result)

            if retries > 1:
                cond = Conditional(loader=self._loader)
                cond.when = self._task.until
                if cond.evaluate_conditional(templar, vars_copy):
                    break
                else:
                    # no conditional check, or it failed, so sleep for the specified time
                    if attempt < retries:
                        result['attempts'] = attempt
                        result['_ansible_retry'] = True
                        result['retries'] = retries
                        display.debug('Retrying task, attempt %d of %d' % (attempt, retries))
                        self._rslt_q.put(TaskResult(self._host, self._task, result), block=False)
                        time.sleep(delay)
Exemple #55
0
    def _execute(self, variables=None):
        '''
        The primary workhorse of the executor system, this runs the task
        on the specified host (which may be the delegated_to host) and handles
        the retry/until and block rescue/always execution
        '''

        if variables is None:
            variables = self._job_vars

        templar = Templar(loader=self._loader,
                          shared_loader_obj=self._shared_loader_obj,
                          variables=variables)

        # apply the given task's information to the connection info,
        # which may override some fields already set by the play or
        # the options specified on the command line
        self._play_context = self._play_context.set_task_and_variable_override(
            task=self._task, variables=variables, templar=templar)

        # fields set from the play/task may be based on variables, so we have to
        # do the same kind of post validation step on it here before we use it.
        # We also add "magic" variables back into the variables dict to make sure
        # a certain subset of variables exist.
        self._play_context.update_vars(variables)
        self._play_context.post_validate(templar=templar)

        # Evaluate the conditional (if any) for this task, which we do before running
        # the final task post-validation. We do this before the post validation due to
        # the fact that the conditional may specify that the task be skipped due to a
        # variable not being present which would otherwise cause validation to fail
        if not self._task.evaluate_conditional(templar, variables):
            self._display.debug("when evaulation failed, skipping this task")
            return dict(changed=False,
                        skipped=True,
                        skip_reason='Conditional check failed',
                        _ansible_no_log=self._play_context.no_log)

        # Now we do final validation on the task, which sets all fields to their final values.
        # In the case of debug tasks, we save any 'var' params and restore them after validating
        # so that variables are not replaced too early.
        prev_var = None
        if self._task.action == 'debug' and 'var' in self._task.args:
            prev_var = self._task.args.pop('var')

        original_args = self._task.args.copy()
        self._task.post_validate(templar=templar)
        if '_variable_params' in self._task.args:
            variable_params = self._task.args.pop('_variable_params')
            if isinstance(variable_params, dict):
                self._display.deprecated(
                    "Using variables for task params is unsafe, especially if the variables come from an external source like facts"
                )
                variable_params.update(self._task.args)
                self._task.args = variable_params

        if prev_var is not None:
            self._task.args['var'] = prev_var

        # if this task is a TaskInclude, we just return now with a success code so the
        # main thread can expand the task list for the given host
        if self._task.action == 'include':
            include_variables = original_args
            include_file = include_variables.get('_raw_params')
            del include_variables['_raw_params']
            return dict(include=include_file,
                        include_variables=include_variables)

        # get the connection and the handler for this execution
        self._connection = self._get_connection(variables=variables,
                                                templar=templar)
        self._connection.set_host_overrides(host=self._host)

        self._handler = self._get_action_handler(connection=self._connection,
                                                 templar=templar)

        # And filter out any fields which were set to default(omit), and got the omit token value
        omit_token = variables.get('omit')
        if omit_token is not None:
            self._task.args = dict((i[0], i[1])
                                   for i in iteritems(self._task.args)
                                   if i[1] != omit_token)

        # Read some values from the task, so that we can modify them if need be
        retries = self._task.retries
        if retries <= 0:
            retries = 1

        delay = self._task.delay
        if delay < 0:
            delay = 1

        # make a copy of the job vars here, in case we need to update them
        # with the registered variable value later on when testing conditions
        vars_copy = variables.copy()

        self._display.debug("starting attempt loop")
        result = None
        for attempt in range(retries):
            if attempt > 0:
                # FIXME: this should use the self._display.callback/message passing mechanism
                self._display.display(
                    "FAILED - RETRYING: %s (%d retries left). Result was: %s" %
                    (self._task, retries - attempt, result),
                    color="red")
                result['attempts'] = attempt + 1

            self._display.debug("running the handler")
            try:
                result = self._handler.run(task_vars=variables)
            except AnsibleConnectionFailure as e:
                return dict(unreachable=True, msg=str(e))
            self._display.debug("handler run complete")

            if self._task. async > 0:
                # the async_wrapper module returns dumped JSON via its stdout
                # response, so we parse it here and replace the result
                try:
                    result = json.loads(result.get('stdout'))
                except (TypeError, ValueError) as e:
                    return dict(
                        failed=True,
                        msg="The async task did not return valid JSON: %s" %
                        str(e))

                if self._task.poll > 0:
                    result = self._poll_async_result(result=result,
                                                     templar=templar)

            # update the local copy of vars with the registered value, if specified,
            # or any facts which may have been generated by the module execution
            if self._task.register:
                vars_copy[self._task.register] = result

            if 'ansible_facts' in result:
                vars_copy.update(result['ansible_facts'])

            # create a conditional object to evaluate task conditions
            cond = Conditional(loader=self._loader)

            def _evaluate_changed_when_result(result):
                if self._task.changed_when is not None:
                    cond.when = [self._task.changed_when]
                    result['changed'] = cond.evaluate_conditional(
                        templar, vars_copy)

            def _evaluate_failed_when_result(result):
                if self._task.failed_when is not None:
                    cond.when = [self._task.failed_when]
                    failed_when_result = cond.evaluate_conditional(
                        templar, vars_copy)
                    result['failed_when_result'] = result[
                        'failed'] = failed_when_result
                    return failed_when_result
                return False

            if self._task.until:
                cond.when = self._task.until
                if cond.evaluate_conditional(templar, vars_copy):
                    _evaluate_changed_when_result(result)
                    _evaluate_failed_when_result(result)
                    break
            elif (self._task.changed_when is not None or self._task.failed_when
                  is not None) and 'skipped' not in result:
                _evaluate_changed_when_result(result)
                if _evaluate_failed_when_result(result):
                    break
            elif 'failed' not in result:
                if result.get('rc', 0) != 0:
                    result['failed'] = True
                else:
                    # if the result is not failed, stop trying
                    break

            if attempt < retries - 1:
                time.sleep(delay)
            else:
                _evaluate_changed_when_result(result)
                _evaluate_failed_when_result(result)