Exemple #1
0
    def run(self):
        """Run the ansible command

        Subclasses must implement this method.  It does the actual work of
        running an Ansible command.
        """

        display.vv(to_text(self.parser.get_version()))

        if C.CONFIG_FILE:
            display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
        else:
            display.v(u"No config file found; using defaults")

        # warn about deprecated config options
        for deprecated in C.config.DEPRECATED:
            name = deprecated[0]
            why = deprecated[1]['why']
            if 'alternatives' in deprecated[1]:
                alt = ', use %s instead' % deprecated[1]['alternatives']
            else:
                alt = ''
            ver = deprecated[1]['version']
            display.deprecated("%s option, %s %s" % (name, why, alt), version=ver)

        # warn about typing issues with configuration entries
        for unable in C.config.UNABLE:
            display.warning("Unable to set correct type for configuration entry: %s" % unable)
 def send_callback(self, method_name, *args, **kwargs):
     for callback_plugin in self._callback_plugins:
         # a plugin that set self.disabled to True will not be called
         # see osx_say.py example for such a plugin
         if getattr(callback_plugin, 'disabled', False):
             continue
         methods = [
             getattr(callback_plugin, method_name, None),
             getattr(callback_plugin, 'v2_on_any', None)
         ]
         for method in methods:
             if method is not None:
                 try:
                     # temporary hack, required due to a change in the callback API, so
                     # we don't break backwards compatibility with callbacks which were
                     # designed to use the original API
                     # FIXME: target for removal and revert to the original code here
                     #        after a year (2017-01-14)
                     if method_name == 'v2_playbook_on_start':
                         import inspect
                         (f_args, f_varargs, f_keywords, f_defaults) = inspect.getargspec(method)
                         if 'playbook' in f_args:
                             method(*args, **kwargs)
                         else:
                             method()
                     else:
                         method(*args, **kwargs)
                 except Exception as e:
                     try:
                         v1_method = method.replace('v2_','')
                         v1_method(*args, **kwargs)
                     except Exception:
                         display.warning('Error when using %s: %s' % (method, str(e)))
Exemple #3
0
    def run(self):

        super(ConfigCLI, self).run()

        if self.options.config_file:
            self.config_file = unfrackpath(self.options.config_file, follow=False)
            self.config = ConfigManager(self.config_file)
        else:
            self.config = ConfigManager()
            self.config_file = find_ini_config_file()

        if self.config_file:
            try:
                if not os.path.exists(self.config_file):
                    raise AnsibleOptionsError("%s does not exist or is not accessible" % (self.config_file))
                elif not os.path.isfile(self.config_file):
                    raise AnsibleOptionsError("%s is not a valid file" % (self.config_file))

                os.environ['ANSIBLE_CONFIG'] = to_native(self.config_file)
            except:
                if self.action in ['view']:
                    raise
                elif self.action in ['edit', 'update']:
                    display.warning("File does not exist, used empty file: %s" % self.config_file)

        elif self.action == 'view':
            raise AnsibleError('Invalid or no config file was supplied')

        self.execute()
Exemple #4
0
 def select_playbook(self, path):
     playbook = None
     if len(self.args) > 0 and self.args[0] is not None:
         playbook = os.path.join(path, self.args[0])
         rc = self.try_playbook(playbook)
         if rc != 0:
             display.warning("%s: %s" % (playbook, self.PLAYBOOK_ERRORS[rc]))
             return None
         return playbook
     else:
         fqdn = socket.getfqdn()
         hostpb = os.path.join(path, fqdn + '.yml')
         shorthostpb = os.path.join(path, fqdn.split('.')[0] + '.yml')
         localpb = os.path.join(path, self.DEFAULT_PLAYBOOK)
         errors = []
         for pb in [hostpb, shorthostpb, localpb]:
             rc = self.try_playbook(pb)
             if rc == 0:
                 playbook = pb
                 break
             else:
                 errors.append("%s: %s" % (pb, self.PLAYBOOK_ERRORS[rc]))
         if playbook is None:
             display.warning("\n".join(errors))
         return playbook
Exemple #5
0
    def path_dwim_relative_stack(self, paths, dirname, source):
        '''
        find one file in first path in stack taking roles into account and adding play basedir as fallback

        :arg paths: A list of text strings which are the paths to look for the filename in.
        :arg dirname: A text string representing a directory.  The directory
            is prepended to the source to form the path to search for.
        :arg source: A text string which is the filename to search for
        :rtype: A text string
        :returns: An absolute path to the filename ``source``
        '''
        b_dirname = to_bytes(dirname)
        b_source = to_bytes(source)

        result = None
        if source is None:
            display.warning('Invalid request to find a file that matches a "null" value')
        elif source and (source.startswith('~') or source.startswith(os.path.sep)):
            # path is absolute, no relative needed, check existence and return source
            test_path = unfrackpath(b_source)
            if os.path.exists(to_bytes(test_path, errors='surrogate_or_strict')):
                result = test_path
        else:
            search = []
            for path in paths:
                upath = unfrackpath(path)
                b_upath = to_bytes(upath, errors='surrogate_or_strict')
                b_mydir = os.path.dirname(b_upath)

                # if path is in role and 'tasks' not there already, add it into the search
                if b_upath.endswith(b'tasks') and os.path.exists(os.path.join(b_upath, b'main.yml')) \
                        or os.path.exists(os.path.join(b_upath, b'tasks/main.yml')) \
                        or os.path.exists(os.path.join(b_mydir, b'tasks/main.yml')):
                    if b_mydir.endswith(b'tasks'):
                        search.append(os.path.join(os.path.dirname(b_mydir), b_dirname, b_source))
                        search.append(os.path.join(b_mydir, b_source))
                    else:
                        # don't add dirname if user already is using it in source
                        if b_source.split(b'/')[0] == b_dirname:
                            search.append(os.path.join(b_upath, b_source))
                        else:
                            search.append(os.path.join(b_upath, b_dirname, b_source))
                        search.append(os.path.join(b_upath, b'tasks', b_source))
                elif b_dirname not in b_source.split(b'/'):
                    # don't add dirname if user already is using it in source
                    search.append(os.path.join(b_upath, b_dirname, b_source))
                    search.append(os.path.join(b_upath, b_source))

            # always append basedir as last resort
            search.append(os.path.join(to_bytes(self.get_basedir()), b_dirname, b_source))
            search.append(os.path.join(to_bytes(self.get_basedir()), b_source))

            display.debug(u'search_path:\n\t%s' % to_text(b'\n\t'.join(search)))
            for b_candidate in search:
                display.vvvvv(u'looking for "%s" at "%s"' % (source, to_text(b_candidate)))
                if os.path.exists(b_candidate):
                    result = to_text(b_candidate)
                    break

        return result
Exemple #6
0
    def construct_mapping(self, node, deep=False):
        # Most of this is from yaml.constructor.SafeConstructor.  We replicate
        # it here so that we can warn users when they have duplicate dict keys
        # (pyyaml silently allows overwriting keys)
        if not isinstance(node, MappingNode):
            raise ConstructorError(None, None,
                    "expected a mapping node, but found %s" % node.id,
                    node.start_mark)
        self.flatten_mapping(node)
        mapping = AnsibleMapping()

        # Add our extra information to the returned value
        mapping.ansible_pos = self._node_position_info(node)

        for key_node, value_node in node.value:
            key = self.construct_object(key_node, deep=deep)
            try:
                hash(key)
            except TypeError as exc:
                raise ConstructorError("while constructing a mapping", node.start_mark,
                        "found unacceptable key (%s)" % exc, key_node.start_mark)

            if key in mapping:
                display.warning('While constructing a mapping from {1}, line {2}, column {3}, found a duplicate dict key ({0}).  Using last defined value only.'.format(key, *mapping.ansible_pos))

            value = self.construct_object(value_node, deep=deep)
            mapping[key] = value

        return mapping
Exemple #7
0
    def get(self, key):
        """ This checks the in memory cache first as the fact was not expired at 'gather time'
        and it would be problematic if the key did expire after some long running tasks and
        user gets 'undefined' error in the same play """

        if key in self._cache:
            return self._cache.get(key)

        if self.has_expired(key) or key == "":
            raise KeyError

        cachefile = "%s/%s" % (self._cache_dir, key)
        try:
            with codecs.open(cachefile, 'r', encoding='utf-8') as f:
                try:
                    value = json.load(f)
                    self._cache[key] = value
                    return value
                except ValueError as e:
                    display.warning("error while trying to read %s : %s. Most likely a corrupt file, so erasing and failing." % (cachefile, to_bytes(e)))
                    self.delete(key)
                    raise AnsibleError("The JSON cache file %s was corrupt, or did not otherwise contain valid JSON data."
                            " It has been removed, so you can re-run your command now." % cachefile)
        except (OSError,IOError) as e:
            display.warning("error while trying to read %s : %s" % (cachefile, to_bytes(e)))
            raise KeyError
Exemple #8
0
    def add_host(self, host, group=None, port=None):
        ''' adds a host to inventory and possibly a group if not there already '''

        g = None
        if group:
            if group in self.groups:
                g = self.groups[group]
            else:
                raise AnsibleError("Could not find group %s in inventory" % group)

        if host not in self.hosts:
            h = Host(host, port)
            self.hosts[host] = h
            if self.current_source:  # set to 'first source' in which host was encountered
                self.set_variable(host, 'inventory_file', os.path.basename(self.current_source))
                self.set_variable(host, 'inventory_dir', basedir(self.current_source))
            else:
                self.set_variable(host, 'inventory_file', None)
                self.set_variable(host, 'inventory_dir', None)
            display.debug("Added host %s to inventory" % (host))

            # set default localhost from inventory to avoid creating an implicit one. Last localhost defined 'wins'.
            if host in C.LOCALHOST:
                if self.localhost is None:
                    self.localhost = self.hosts[host]
                    display.vvvv("Set default localhost to %s" % h)
                else:
                    display.warning("A duplicate localhost-like entry was found (%s). First found localhost was %s" % (h, self.localhost.name))
        else:
            h = self.hosts[host]

        if g:
            g.add_host(h)
            self._groups_dict_cache = {}
            display.debug("Added host %s to group %s" % (host, group))
Exemple #9
0
    def _create_implicit_localhost(self, pattern):

        if self.localhost:
            new_host = self.localhost
        else:
            new_host = Host(pattern)

            # use 'all' vars but not part of all group
            new_host.vars = self.groups['all'].get_vars()

            new_host.address = "127.0.0.1"
            new_host.implicit = True

            if "ansible_python_interpreter" not in new_host.vars:
                py_interp = sys.executable
                if not py_interp:
                    # sys.executable is not set in some cornercases.  #13585
                    py_interp = '/usr/bin/python'
                    display.warning('Unable to determine python interpreter from sys.executable. Using /usr/bin/python default. '
                                    'You can correct this by setting ansible_python_interpreter for localhost')
                new_host.set_variable("ansible_python_interpreter", py_interp)

            if "ansible_connection" not in new_host.vars:
                new_host.set_variable("ansible_connection", 'local')

            self.localhost = new_host

        return new_host
Exemple #10
0
    def _enumerate_matches(self, pattern):
        """
        Returns a list of host names matching the given pattern according to the
        rules explained above in _match_one_pattern.
        """

        results = []
        # check if pattern matches group
        matching_groups = self._match_list(self._inventory.groups, pattern)
        if matching_groups:
            for groupname in matching_groups:
                results.extend(self._inventory.groups[groupname].get_hosts())
        else:
            # pattern might match host
            matching_hosts = self._match_list(self._inventory.hosts, pattern)
            if matching_hosts:
                for hostname in matching_hosts:
                    results.append(self._inventory.hosts[hostname])

        if not results and pattern in C.LOCALHOST:
            # get_host autocreates implicit when needed
            implicit = self._inventory.get_host(pattern)
            if implicit:
                results.append(implicit)

        if not results:
            display.warning("Could not match supplied host pattern, ignoring: %s" % pattern)
        return results
    def send_callback(self, method_name, *args, **kwargs):
        for callback_plugin in [self._stdout_callback] + self._callback_plugins:
            # a plugin that set self.disabled to True will not be called
            # see osx_say.py example for such a plugin
            if getattr(callback_plugin, 'disabled', False):
                continue

            # try to find v2 method, fallback to v1 method, ignore callback if no method found
            methods = []
            for possible in [method_name, 'v2_on_any']:
                gotit =  getattr(callback_plugin, possible, None)
                if gotit is None:
                    gotit = getattr(callback_plugin, possible.replace('v2_',''), None)
                if gotit is not None:
                    methods.append(gotit)

            for method in methods:
                try:
                    # temporary hack, required due to a change in the callback API, so
                    # we don't break backwards compatibility with callbacks which were
                    # designed to use the original API
                    # FIXME: target for removal and revert to the original code here after a year (2017-01-14)
                    if method_name == 'v2_playbook_on_start':
                        import inspect
                        (f_args, f_varargs, f_keywords, f_defaults) = inspect.getargspec(method)
                        if 'playbook' in f_args:
                            method(*args, **kwargs)
                        else:
                            method()
                    else:
                        method(*args, **kwargs)
                except Exception as e:
                    #TODO: add config toggle to make this fatal or not?
                    display.warning(u"Failure when attempting to use callback plugin (%s): %s" % (to_unicode(callback_plugin), to_unicode(e)))
Exemple #12
0
def warn_if_reserved(myvars):
    ''' this function warns if any variable passed conflicts with internally reserved names '''

    varnames = set(myvars)
    varnames.discard('vars')  # we add this one internally, so safe to ignore
    for varname in varnames.intersection(_RESERVED_NAMES):
        display.warning('Found variable using reserved name: %s' % varname)
Exemple #13
0
    def find_plugin(self, name, suffixes=None):
        ''' Find a plugin named name '''

        if not suffixes:
            if self.class_name:
                suffixes = ['.py']
            else:
                suffixes = ['.py', '']

        try:
            return self._plugin_path_cache[name]
        except KeyError:
            # Cache miss.  Now let's find the the plugin
            pass

        for path in [p for p in self._get_paths() if p not in self._searched_paths]:
            if os.path.isdir(path):
                try:
                    full_paths = (os.path.join(path, f) for f in os.listdir(path))
                except OSError as e:
                    display.warning("Error accessing plugin paths: %s" % str(e))
                for full_path in (f for f in full_paths if os.path.isfile(f)):
                    for suffix in suffixes:
                        if full_path.endswith(suffix):
                            full_name = os.path.basename(full_path)
                            if suffix:
                                base_name = full_name[:-len(suffix)]
                            else:
                                base_name = full_name
                            break
                    else: # Yes, this is a for-else: http://bit.ly/1ElPkyg
                        continue

                    # Module found, now see if it's already in the cache
                    if base_name not in self._plugin_path_cache:
                        self._plugin_path_cache[base_name] = full_path

            self._searched_paths.add(path)
            try:
                return self._plugin_path_cache[name]
            except KeyError:
                # Didn't find the plugin in this directory.  Load modules from
                # the next one
                pass

        # if nothing is found, try finding alias/deprecated
        if not name.startswith('_'):
            alias_name = '_' + name
            # We've already cached all the paths at this point
            if alias_name in self._plugin_path_cache:
                if not os.path.islink(self._plugin_path_cache[alias_name]):
                    display.deprecated('%s is kept for backwards compatibility '
                              'but usage is discouraged. The module '
                              'documentation details page may explain '
                              'more about this rationale.' %
                              name.lstrip('_'))
                return self._plugin_path_cache[alias_name]

        return None
Exemple #14
0
    def __init__(self, *args, **kwargs):
        self._plugin = cache_loader.get(C.CACHE_PLUGIN)
        # Backwards compat: self._display isn't really needed, just import the global display and use that.
        self._display = display

        if self._plugin is None:
            display.warning("Failed to load fact cache plugins")
            return
Exemple #15
0
    def set(self, key, value):

        self._cache[key] = value

        cachefile = "%s/%s" % (self._cache_dir, key)
        try:
            self._dump(value, cachefile)
        except (OSError, IOError) as e:
            display.warning("error in '%s' cache plugin while trying to write to %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
Exemple #16
0
    def parse_inventory(self, host_list):

        if isinstance(host_list, string_types):
            if "," in host_list:
                host_list = host_list.split(",")
                host_list = [ h for h in host_list if h and h.strip() ]

        self.parser = None

        # Always create the 'all' and 'ungrouped' groups, even if host_list is
        # empty: in this case we will subsequently an the implicit 'localhost' to it.

        ungrouped = Group('ungrouped')
        all = Group('all')
        all.add_child_group(ungrouped)

        self.groups = dict(all=all, ungrouped=ungrouped)

        if host_list is None:
            pass
        elif isinstance(host_list, list):
            for h in host_list:
                try:
                    (host, port) = parse_address(h, allow_ranges=False)
                except AnsibleError as e:
                    display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_unicode(e))
                    host = h
                    port = None
                all.add_host(Host(host, port))
        elif self._loader.path_exists(host_list):
            #TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins'
            if self.is_directory(host_list):
                # Ensure basedir is inside the directory
                host_list = os.path.join(self.host_list, "")
                self.parser = InventoryDirectory(loader=self._loader, groups=self.groups, filename=host_list)
            else:
                self.parser = get_file_parser(host_list, self.groups, self._loader)
                vars_loader.add_directory(self.basedir(), with_subdir=True)

            if not self.parser:
                # should never happen, but JIC
                raise AnsibleError("Unable to parse %s as an inventory source" % host_list)
        else:
            display.warning("Host file not found: %s" % to_unicode(host_list))

        self._vars_plugins = [ x for x in vars_loader.all(self) ]

        # set group vars from group_vars/ files and vars plugins
        for g in self.groups:
            group = self.groups[g]
            group.vars = combine_vars(group.vars, self.get_group_variables(group.name))

        # set host vars from host_vars/ files and vars plugins
        for host in self.get_hosts():
            host.vars = combine_vars(host.vars, self.get_host_variables(host.name))
Exemple #17
0
    def run(self, tmp=None, task_vars=None):
        del tmp  # tmp no longer has any effect

        socket_path = None

        if self._play_context.connection == 'network_cli':
            provider = self._task.args.get('provider', {})
            if any(provider.values()):
                display.warning('provider is unnecessary when using network_cli and will be ignored')
                del self._task.args['provider']
        elif self._play_context.connection == 'local':
            provider = load_provider(ios_provider_spec, self._task.args)
            pc = copy.deepcopy(self._play_context)
            pc.connection = 'network_cli'
            pc.network_os = 'ios'
            pc.remote_addr = provider['host'] or self._play_context.remote_addr
            pc.port = int(provider['port'] or self._play_context.port or 22)
            pc.remote_user = provider['username'] or self._play_context.connection_user
            pc.password = provider['password'] or self._play_context.password
            pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
            pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
            pc.become = provider['authorize'] or False
            if pc.become:
                pc.become_method = 'enable'
            pc.become_pass = provider['auth_pass']

            display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
            connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)

            socket_path = connection.run()
            display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
            if not socket_path:
                return {'failed': True,
                        'msg': 'unable to open shell. Please see: ' +
                               'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}

            task_vars['ansible_socket'] = socket_path
        else:
            return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}

        # make sure we are in the right cli context which should be
        # enable mode and not config module
        if socket_path is None:
            socket_path = self._connection.socket_path

        conn = Connection(socket_path)
        out = conn.get_prompt()
        while to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
            display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
            conn.send_command('exit')
            out = conn.get_prompt()

        result = super(ActionModule, self).run(task_vars=task_vars)
        return result
Exemple #18
0
    def _run_loop(self, items):
        '''
        Runs the task with the loop items specified and collates the result
        into an array named 'results' which is inserted into the final result
        along with the item for which the loop ran.
        '''

        results = []

        # make copies of the job vars and task so we can add the item to
        # the variables and re-validate the task with the item variable
        #task_vars = self._job_vars.copy()
        task_vars = self._job_vars

        loop_var = 'item'
        if self._task.loop_control:
            # the value may be 'None', so we still need to default it back to 'item' 
            loop_var = self._task.loop_control.loop_var or 'item'

        if loop_var in task_vars:
            display.warning("The loop variable '%s' is already in use. You should set the `loop_var` value in the `loop_control` option for the task to something else to avoid variable collisions and unexpected behavior." % loop_var)

        items = self._squash_items(items, loop_var, task_vars)
        for item in items:
            task_vars[loop_var] = item

            try:
                tmp_task = self._task.copy(exclude_parent=True, exclude_tasks=True)
                tmp_task._parent = self._task._parent
                tmp_play_context = self._play_context.copy()
            except AnsibleParserError as e:
                results.append(dict(failed=True, msg=to_unicode(e)))
                continue

            # now we swap the internal task and play context with their copies,
            # execute, and swap them back so we can do the next iteration cleanly
            (self._task, tmp_task) = (tmp_task, self._task)
            (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
            res = self._execute(variables=task_vars)
            (self._task, tmp_task) = (tmp_task, self._task)
            (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)

            # now update the result with the item info, and append the result
            # to the list of results
            res[loop_var] = item
            res['_ansible_item_result'] = True

            self._rslt_q.put(TaskResult(self._host.name, self._task._uuid, res), block=False)
            results.append(res)
            del task_vars[loop_var]

        return results
Exemple #19
0
    def _lookup(self, name, *args, **kwargs):
        instance = self._lookup_loader.get(name.lower(), loader=self._loader, templar=self)

        if instance is not None:
            wantlist = kwargs.pop('wantlist', False)
            allow_unsafe = kwargs.pop('allow_unsafe', C.DEFAULT_ALLOW_UNSAFE_LOOKUPS)
            errors = kwargs.pop('errors', 'strict')

            from ansible.utils.listify import listify_lookup_plugin_terms
            loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, loader=self._loader, fail_on_undefined=True, convert_bare=False)
            # safely catch run failures per #5059
            try:
                ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
            except (AnsibleUndefinedVariable, UndefinedError) as e:
                raise AnsibleUndefinedVariable(e)
            except Exception as e:
                if self._fail_on_lookup_errors:
                    msg = u"An unhandled exception occurred while running the lookup plugin '%s'. Error was a %s, original message: %s" % \
                          (name, type(e), to_text(e))
                    if errors == 'warn':
                        display.warning(msg)
                    elif errors == 'ignore':
                        display.display(msg, log_only=True)
                    else:
                        raise AnsibleError(to_native(msg))
                ran = None

            if ran and not allow_unsafe:
                if wantlist:
                    ran = wrap_var(ran)
                else:
                    try:
                        ran = UnsafeProxy(",".join(ran))
                    except TypeError:
                        # Lookup Plugins should always return lists.  Throw an error if that's not
                        # the case:
                        if not isinstance(ran, Sequence):
                            raise AnsibleError("The lookup plugin '%s' did not return a list."
                                               % name)

                        # The TypeError we can recover from is when the value *inside* of the list
                        # is not a string
                        if len(ran) == 1:
                            ran = wrap_var(ran[0])
                        else:
                            ran = wrap_var(ran)

                if self.cur_context:
                    self.cur_context.unsafe = True
            return ran
        else:
            raise AnsibleError("lookup plugin (%s) not found" % name)
Exemple #20
0
    def all(self, *args, **kwargs):
        ''' instantiates all plugins with the same arguments '''

        path_only = kwargs.pop('path_only', False)
        class_only = kwargs.pop('class_only', False)
        all_matches = []
        found_in_cache = True

        for i in self._get_paths():
            all_matches.extend(glob.glob(os.path.join(i, "*.py")))

        for path in sorted(all_matches, key=os.path.basename):
            name = os.path.basename(os.path.splitext(path)[0])

            if '__init__' in name:
                continue

            if path_only:
                yield path
                continue

            if path not in self._module_cache:
                self._module_cache[path] = self._load_module_source(name, path)
                found_in_cache = False

            try:
                obj = getattr(self._module_cache[path], self.class_name)
            except AttributeError as e:
                display.warning("Skipping plugin (%s) as it seems to be invalid: %s" % (path, to_text(e)))
                continue

            if self.base_class:
                # The import path is hardcoded and should be the right place,
                # so we are not expecting an ImportError.
                module = __import__(self.package, fromlist=[self.base_class])
                # Check whether this obj has the required base class.
                try:
                    plugin_class = getattr(module, self.base_class)
                except AttributeError:
                    continue
                if not issubclass(obj, plugin_class):
                    continue

            self._display_plugin_load(self.class_name, name, self._searched_paths, path, found_in_cache=found_in_cache, class_only=class_only)
            if not class_only:
                try:
                    obj = obj(*args, **kwargs)
                except TypeError as e:
                    display.warning("Skipping plugin (%s) as it seems to be incomplete: %s" % (path, to_text(e)))

            self._update_object(obj, name, path)
            yield obj
Exemple #21
0
    def run(self, tmp=None, task_vars=None):
        socket_path = None
        transport = 'rest'

        if self._play_context.connection == 'network_cli':
            provider = self._task.args.get('provider', {})
            if any(provider.values()):
                display.warning("'provider' is unnecessary when using 'network_cli' and will be ignored")
        elif self._play_context.connection == 'local':
            provider = load_provider(f5_provider_spec, self._task.args)
            transport = provider['transport'] or transport

            display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr)

            if transport == 'cli':
                pc = copy.deepcopy(self._play_context)
                pc.connection = 'network_cli'
                pc.network_os = 'bigip'
                pc.remote_addr = provider.get('server', self._play_context.remote_addr)
                pc.port = int(provider['server_port'] or self._play_context.port or 22)
                pc.remote_user = provider.get('user', self._play_context.connection_user)
                pc.password = provider.get('password', self._play_context.password)
                pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
                pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)

                display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
                connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
                socket_path = connection.run()
                display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
                if not socket_path:
                    return {'failed': True,
                            'msg': 'Unable to open shell. Please see: ' +
                                   'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}

                task_vars['ansible_socket'] = socket_path
        else:
            return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}

        if (self._play_context.connection == 'local' and transport == 'cli') or self._play_context.connection == 'network_cli':
            # make sure we are in the right cli context which should be
            # enable mode and not config module
            if socket_path is None:
                socket_path = self._connection.socket_path
            conn = Connection(socket_path)
            out = conn.get_prompt()
            while '(config' in to_text(out, errors='surrogate_then_replace').strip():
                display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
                conn.send_command('exit')
                out = conn.get_prompt()

        result = super(ActionModule, self).run(tmp, task_vars)
        return result
Exemple #22
0
def remove_internal_keys(data):
    '''
    More nuanced version of strip_internal_keys
    '''
    for key in list(data.keys()):
        if (key.startswith('_ansible_') and key != '_ansible_parsed') or key in C.INTERNAL_RESULT_KEYS:
            display.warning("Removed unexpected internal key in module return: %s = %s" % (key, data[key]))
            del data[key]

    # remove bad/empty internal keys
    for key in ['warnings', 'deprecations']:
        if key in data and not data[key]:
            del data[key]
Exemple #23
0
    def _remove_tmp_path(self, tmp_path):
        '''Remove a temporary path we created. '''

        if self._should_remove_tmp_path(tmp_path):
            cmd = self._connection._shell.remove(tmp_path, recurse=True)
            # If we have gotten here we have a working ssh configuration.
            # If ssh breaks we could leave tmp directories out on the remote system.
            tmp_rm_res = self._low_level_execute_command(cmd, sudoable=False)

            tmp_rm_data = self._parse_returned_data(tmp_rm_res)
            if tmp_rm_data.get('rc', 0) != 0:
                display.warning('Error deleting remote temporary files (rc: %s, stderr: %s})'
                                % (tmp_rm_res.get('rc'), tmp_rm_res.get('stderr', 'No error string available.')))
Exemple #24
0
    def _get_docker_remote_user(self):
        """ Get the default user configured in the docker container """
        p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', self._play_context.remote_addr],
                             stdout=subprocess.PIPE, stderr=subprocess.PIPE)

        out, err = p.communicate()

        if p.returncode != 0:
            display.warning('unable to retrieve default user from docker container: %s' % out + err)
            return None

        # The default exec user is root, unless it was changed in the Dockerfile with USER
        return out.strip() or 'root'
Exemple #25
0
    def set_host_overrides(self, host):
        '''
        Override WinRM-specific options from host variables.
        '''
        host_vars = combine_vars(host.get_group_vars(), host.get_vars())

        self._winrm_host = self._play_context.remote_addr
        self._winrm_port = int(self._play_context.port or 5986)
        self._winrm_scheme = host_vars.get('ansible_winrm_scheme', 'http' if self._winrm_port == 5985 else 'https')
        self._winrm_path = host_vars.get('ansible_winrm_path', '/wsman')
        self._winrm_user = self._play_context.remote_user
        self._winrm_pass = self._play_context.password

        if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'):
            self._winrm_supported_authtypes = set(winrm.FEATURE_SUPPORTED_AUTHTYPES)
        else:
            # for legacy versions of pywinrm, use the values we know are supported
            self._winrm_supported_authtypes = set(['plaintext','ssl','kerberos'])

        # TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic
        transport_selector = 'ssl' if self._winrm_scheme == 'https' else 'plaintext'

        if HAVE_KERBEROS and ((self._winrm_user and '@' in self._winrm_user)):
            self._winrm_transport = 'kerberos,%s' % transport_selector
        else:
            self._winrm_transport = transport_selector
        self._winrm_transport = host_vars.get('ansible_winrm_transport', self._winrm_transport)
        if isinstance(self._winrm_transport, basestring):
            self._winrm_transport = [x.strip() for x in self._winrm_transport.split(',') if x.strip()]

        unsupported_transports = set(self._winrm_transport).difference(self._winrm_supported_authtypes)

        if unsupported_transports:
            raise AnsibleError('The installed version of WinRM does not support transport(s) %s' % list(unsupported_transports))

        self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass)
        argspec = inspect.getargspec(Protocol.__init__)
        supported_winrm_args = set(argspec.args)
        passed_winrm_args = set([v.replace('ansible_winrm_', '') for v in host_vars if v.startswith('ansible_winrm_')])
        unsupported_args = passed_winrm_args.difference(supported_winrm_args)

        # warn for kwargs unsupported by the installed version of pywinrm
        for arg in unsupported_args:
            display.warning("ansible_winrm_{0} unsupported by pywinrm (are you running the right pywinrm version?)".format(arg))

        # arg names we're going passing directly
        internal_kwarg_mask = set(['self', 'endpoint', 'transport', 'username', 'password'])

        # pass through matching kwargs, excluding the list we want to treat specially
        for arg in passed_winrm_args.difference(internal_kwarg_mask).intersection(supported_winrm_args):
            self._winrm_kwargs[arg] = host_vars['ansible_winrm_%s' % arg]
Exemple #26
0
    def _winrm_exec(self, command, args=(), from_exec=False, stdin_iterator=None):
        if not self.protocol:
            self.protocol = self._winrm_connect()
            self._connected = True
        if from_exec:
            display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
        else:
            display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
        command_id = None
        try:
            stdin_push_failed = False
            command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator is None))

            # TODO: try/except around this, so we can get/return the command result on a broken pipe or other failure (probably more useful than the 500 that
            # comes from this)
            try:
                if stdin_iterator:
                    for (data, is_last) in stdin_iterator:
                        self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last)

            except Exception as ex:
                from traceback import format_exc
                display.warning("FATAL ERROR DURING FILE TRANSFER: %s" % format_exc(ex))
                stdin_push_failed = True

            if stdin_push_failed:
                raise AnsibleError('winrm send_input failed')

            # NB: this can hang if the receiver is still running (eg, network failed a Send request but the server's still happy).
            # FUTURE: Consider adding pywinrm status check/abort operations to see if the target is still running after a failure.
            resptuple = self.protocol.get_command_output(self.shell_id, command_id)
            # ensure stdout/stderr are text for py3
            # FUTURE: this should probably be done internally by pywinrm
            response = Response(tuple(to_text(v) if isinstance(v, binary_type) else v for v in resptuple))

            # TODO: check result from response and set stdin_push_failed if we have nonzero
            if from_exec:
                display.vvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
            else:
                display.vvvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)

            display.vvvvvv('WINRM STDOUT %s' % to_text(response.std_out), host=self._winrm_host)
            display.vvvvvv('WINRM STDERR %s' % to_text(response.std_err), host=self._winrm_host)

            if stdin_push_failed:
                raise AnsibleError('winrm send_input failed; \nstdout: %s\nstderr %s' % (response.std_out, response.std_err))

            return response
        finally:
            if command_id:
                self.protocol.cleanup_command(self.shell_id, command_id)
Exemple #27
0
    def _file_transport_command(self, in_path, out_path, sftp_action):
        # scp and sftp require square brackets for IPv6 addresses, but
        # accept them for hostnames and IPv4 addresses too.
        host = '[%s]' % self.host

        # since this can be a non-bool now, we need to handle it correctly
        scp_if_ssh = C.DEFAULT_SCP_IF_SSH
        if not isinstance(scp_if_ssh, bool):
            scp_if_ssh = scp_if_ssh.lower()
            if scp_if_ssh in BOOLEANS:
                scp_if_ssh = boolean(scp_if_ssh)
            elif scp_if_ssh != 'smart':
                raise AnsibleOptionsError('scp_if_ssh needs to be one of [smart|True|False]')

        # create a list of commands to use based on config options
        methods = ['sftp']
        if scp_if_ssh == 'smart':
            methods.append('scp')
        elif scp_if_ssh:
            methods = ['scp']

        success = False
        res = None
        for method in methods:
            if method == 'sftp':
                cmd = self._build_command('sftp', to_bytes(host))
                in_data = u"{0} {1} {2}\n".format(sftp_action, shlex_quote(in_path), shlex_quote(out_path))
            elif method == 'scp':
                if sftp_action == 'get':
                    cmd = self._build_command('scp', u'{0}:{1}'.format(host, shlex_quote(in_path)), out_path)
                else:
                    cmd = self._build_command('scp', in_path, u'{0}:{1}'.format(host, shlex_quote(out_path)))
                in_data = None

            in_data = to_bytes(in_data, nonstring='passthru')
            (returncode, stdout, stderr) = self._run(cmd, in_data, checkrc=False)
            # Check the return code and rollover to next method if failed
            if returncode == 0:
                success = True
                break
            else:
                # If not in smart mode, the data will be printed by the raise below
                if scp_if_ssh == 'smart':
                    display.warning(msg='%s transfer mechanism failed on %s. Use ANSIBLE_DEBUG=1 to see detailed information' % (method, host))
                    display.debug(msg='%s' % to_native(stdout))
                    display.debug(msg='%s' % to_native(stderr))
                res = (returncode, stdout, stderr)

        if not success:
            raise AnsibleError("failed to transfer file to {0}:\n{1}\n{2}"\
                    .format(to_native(out_path), to_native(res[1]), to_native(res[2])))
Exemple #28
0
    def set(self, key, value):

        self._cache[key] = value

        cachefile = "%s/%s" % (self._cache_dir, key)
        try:
            f = codecs.open(cachefile, 'w', encoding='utf-8')
        except (OSError,IOError) as e:
            display.warning("error while trying to write to %s : %s" % (cachefile, to_bytes(e)))
            pass
        else:
            f.write(jsonify(value))
        finally:
            f.close()
Exemple #29
0
    def emit(self, record):
        mitogen_name = getattr(record, 'mitogen_name', '')
        if mitogen_name == 'stderr':
            record.levelno = logging.ERROR
        if mitogen_name in self.NOISY_LOGGERS and record.levelno >= logging.WARNING:
            record.levelno = logging.DEBUG

        s = '[pid %d] %s' % (os.getpid(), self.format(record))
        if record.levelno >= logging.ERROR:
            display.error(s, wrap_text=False)
        elif record.levelno >= logging.WARNING:
            display.warning(s, formatted=True)
        else:
            self.normal_method(s)
Exemple #30
0
    def __init__(self, *args, **kwargs):

        self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
        self._cache = {}
        self._cache_dir = os.path.expanduser(os.path.expandvars(C.CACHE_PLUGIN_CONNECTION)) # expects a dir path
        if not self._cache_dir:
            raise AnsibleError("error, fact_caching_connection is not set, cannot use fact cache")

        if not os.path.exists(self._cache_dir):
            try:
                os.makedirs(self._cache_dir)
            except (OSError,IOError) as e:
                display.warning("error while trying to create cache dir %s : %s" % (self._cache_dir, to_bytes(e)))
                return None
Exemple #31
0
    def make_become_cmd(self, cmd, executable=None):
        """ helper function to create privilege escalation commands """

        prompt      = None
        success_key = None
        self.prompt = None

        if self.become:

            if not executable:
                executable = self.executable

            becomecmd   = None
            randbits    = ''.join(random.choice(string.ascii_lowercase) for x in range(32))
            success_key = 'BECOME-SUCCESS-%s' % randbits
            success_cmd = shlex_quote('echo %s; %s' % (success_key, cmd))

            if executable:
                command = '%s -c %s' % (executable, success_cmd)
            else:
                command = success_cmd

            # set executable to use for the privilege escalation method, with various overrides
            exe = self.become_exe or \
                  getattr(self, '%s_exe' % self.become_method, None) or \
                  C.DEFAULT_BECOME_EXE or \
                  getattr(C, 'DEFAULT_%s_EXE' % self.become_method.upper(), None) or \
                  self.become_method

            # set flags to use for the privilege escalation method, with various overrides
            flags = self.become_flags or \
                    getattr(self, '%s_flags' % self.become_method, None) or \
                    C.DEFAULT_BECOME_FLAGS or \
                    getattr(C, 'DEFAULT_%s_FLAGS' % self.become_method.upper(), None) or \
                    ''

            if self.become_method == 'sudo':
                # If we have a password, we run sudo with a randomly-generated
                # prompt set using -p. Otherwise we run it with default -n, which makes
                # it fail if it would have prompted for a password.
                # Cannot rely on -n as it can be removed from defaults, which should be
                # done for older versions of sudo that do not support the option.
                #
                # Passing a quoted compound command to sudo (or sudo -s)
                # directly doesn't work, so we shellquote it with shlex_quote()
                # and pass the quoted string to the user's shell.

                # force quick error if password is required but not supplied, should prevent sudo hangs.
                if self.become_pass:
                    prompt = '[sudo via ansible, key=%s] password: '******'%s %s -p "%s" -u %s %s' % (exe,  flags.replace('-n',''), prompt, self.become_user, command)
                else:
                    becomecmd = '%s %s -u %s %s' % (exe, flags, self.become_user, command)


            elif self.become_method == 'su':

                # passing code ref to examine prompt as simple string comparisson isn't good enough with su
                def detect_su_prompt(b_data):
                    b_password_string = b"|".join([b'(\w+\'s )?' + x for x in b_SU_PROMPT_LOCALIZATIONS])
                    # Colon or unicode fullwidth colon
                    b_password_string = b_password_string + to_bytes(u' ?(:|:) ?')
                    b_SU_PROMPT_LOCALIZATIONS_RE = re.compile(b_password_string, flags=re.IGNORECASE)
                    return bool(b_SU_PROMPT_LOCALIZATIONS_RE.match(b_data))
                prompt = detect_su_prompt

                becomecmd = '%s %s %s -c %s' % (exe, flags, self.become_user, shlex_quote(command))

            elif self.become_method == 'pbrun':

                prompt='Password:'******'%s %s -u %s %s' % (exe, flags, self.become_user, success_cmd)

            elif self.become_method == 'ksu':
                def detect_ksu_prompt(b_data):
                    return re.match(b"Kerberos password for .*@.*:", b_data)

                prompt = detect_ksu_prompt
                becomecmd = '%s %s %s -e %s' % (exe, self.become_user, flags, command)

            elif self.become_method == 'pfexec':

                # No user as it uses it's own exec_attr to figure it out
                becomecmd = '%s %s "%s"' % (exe, flags, success_cmd)

            elif self.become_method == 'runas':
                # become is handled inside the WinRM connection plugin
                display.warning("The Windows 'runas' become method is experimental, and may change significantly in future Ansible releases.")

                if not self.become_user:
                    raise AnsibleError(("The 'runas' become method requires a username "
                                        "(specify with the '--become-user' CLI arg, the 'become_user' keyword, or the 'ansible_become_user' variable)"))
                if not self.become_pass:
                    raise AnsibleError(("The 'runas' become method requires a password "
                                       "(specify with the '-K' CLI arg or the 'ansible_become_password' variable)"))
                becomecmd = cmd

            elif self.become_method == 'doas':

                prompt = 'doas (%s@' % self.remote_user
                exe = self.become_exe or 'doas'

                if not self.become_pass:
                    flags += ' -n '

                if self.become_user:
                    flags += ' -u %s ' % self.become_user

                #FIXME: make shell independent
                becomecmd = '%s %s echo %s && %s %s env ANSIBLE=true %s' % (exe, flags, success_key, exe, flags, cmd)

            elif self.become_method == 'dzdo':

                exe = self.become_exe or 'dzdo'
                if self.become_pass:
                    prompt = '[dzdo via ansible, key=%s] password: '******'%s -p %s -u %s %s' % (exe, shlex_quote(prompt), self.become_user, command)
                else:
                    becomecmd = '%s -u %s %s' % (exe, self.become_user, command)

            else:
                raise AnsibleError("Privilege escalation method not found: %s" % self.become_method)

            if self.become_pass:
                self.prompt = prompt
            self.success_key = success_key
            return becomecmd

        return cmd
Exemple #32
0
    def _run_loop(self, items):
        '''
        Runs the task with the loop items specified and collates the result
        into an array named 'results' which is inserted into the final result
        along with the item for which the loop ran.
        '''

        results = []

        # make copies of the job vars and task so we can add the item to
        # the variables and re-validate the task with the item variable
        #task_vars = self._job_vars.copy()
        task_vars = self._job_vars

        loop_var = 'item'
        label = None
        loop_pause = 0
        if self._task.loop_control:
            # the value may be 'None', so we still need to default it back to 'item'
            loop_var = self._task.loop_control.loop_var or 'item'
            label = self._task.loop_control.label or ('{{' + loop_var + '}}')
            loop_pause = self._task.loop_control.pause or 0

        if loop_var in task_vars:
            display.warning(
                u"The loop variable '%s' is already in use. "
                u"You should set the `loop_var` value in the `loop_control` option for the task"
                u" to something else to avoid variable collisions and unexpected behavior."
                % loop_var)

        ran_once = False
        items = self._squash_items(items, loop_var, task_vars)
        for item in items:
            task_vars[loop_var] = item

            # pause between loop iterations
            if loop_pause and ran_once:
                time.sleep(loop_pause)
            else:
                ran_once = True

            try:
                tmp_task = self._task.copy(exclude_parent=True,
                                           exclude_tasks=True)
                tmp_task._parent = self._task._parent
                tmp_play_context = self._play_context.copy()
            except AnsibleParserError as e:
                results.append(dict(failed=True, msg=to_text(e)))
                continue

            # now we swap the internal task and play context with their copies,
            # execute, and swap them back so we can do the next iteration cleanly
            (self._task, tmp_task) = (tmp_task, self._task)
            (self._play_context, tmp_play_context) = (tmp_play_context,
                                                      self._play_context)
            res = self._execute(variables=task_vars)
            (self._task, tmp_task) = (tmp_task, self._task)
            (self._play_context, tmp_play_context) = (tmp_play_context,
                                                      self._play_context)

            # now update the result with the item info, and append the result
            # to the list of results
            res[loop_var] = item
            res['_ansible_item_result'] = True

            if label is not None:
                templar = Templar(loader=self._loader,
                                  shared_loader_obj=self._shared_loader_obj,
                                  variables=self._job_vars)
                res['_ansible_item_label'] = templar.template(label)

            self._rslt_q.put(
                TaskResult(
                    self._host.name,
                    self._task._uuid,
                    res,
                    task_fields=self._task.dump_attrs(),
                ),
                block=False,
            )
            results.append(res)
            del task_vars[loop_var]

        return results
Exemple #33
0
    def find_plugin(self,
                    name,
                    mod_type='',
                    ignore_deprecated=False,
                    check_aliases=False):
        ''' Find a plugin named name '''

        global _PLUGIN_FILTERS
        if name in _PLUGIN_FILTERS[self.package]:
            return None

        if mod_type:
            suffix = mod_type
        elif self.class_name:
            # Ansible plugins that run in the controller process (most plugins)
            suffix = '.py'
        else:
            # Only Ansible Modules.  Ansible modules can be any executable so
            # they can have any suffix
            suffix = ''

        if check_aliases:
            name = self.aliases.get(name, name)

        # The particular cache to look for modules within.  This matches the
        # requested mod_type
        pull_cache = self._plugin_path_cache[suffix]
        try:
            return pull_cache[name]
        except KeyError:
            # Cache miss.  Now let's find the plugin
            pass

        # TODO: Instead of using the self._paths cache (PATH_CACHE) and
        #       self._searched_paths we could use an iterator.  Before enabling that
        #       we need to make sure we don't want to add additional directories
        #       (add_directory()) once we start using the iterator.  Currently, it
        #       looks like _get_paths() never forces a cache refresh so if we expect
        #       additional directories to be added later, it is buggy.
        for path in (p for p in self._get_paths()
                     if p not in self._searched_paths and os.path.isdir(p)):
            try:
                full_paths = (os.path.join(path, f) for f in os.listdir(path))
            except OSError as e:
                display.warning("Error accessing plugin paths: %s" %
                                to_text(e))

            for full_path in (
                    f for f in full_paths
                    if os.path.isfile(f) and not f.endswith('__init__.py')):
                full_name = os.path.basename(full_path)

                # HACK: We have no way of executing python byte compiled files as ansible modules so specifically exclude them
                # FIXME: I believe this is only correct for modules and module_utils.
                # For all other plugins we want .pyc and .pyo should be valid
                if full_path.endswith(('.pyc', '.pyo')):
                    continue

                splitname = os.path.splitext(full_name)
                base_name = splitname[0]
                try:
                    extension = splitname[1]
                except IndexError:
                    extension = ''

                # Module found, now enter it into the caches that match this file
                if base_name not in self._plugin_path_cache['']:
                    self._plugin_path_cache[''][base_name] = full_path

                if full_name not in self._plugin_path_cache['']:
                    self._plugin_path_cache[''][full_name] = full_path

                if base_name not in self._plugin_path_cache[extension]:
                    self._plugin_path_cache[extension][base_name] = full_path

                if full_name not in self._plugin_path_cache[extension]:
                    self._plugin_path_cache[extension][full_name] = full_path

            self._searched_paths.add(path)
            try:
                return pull_cache[name]
            except KeyError:
                # Didn't find the plugin in this directory. Load modules from the next one
                pass

        # if nothing is found, try finding alias/deprecated
        if not name.startswith('_'):
            alias_name = '_' + name
            # We've already cached all the paths at this point
            if alias_name in pull_cache:
                if not ignore_deprecated and not os.path.islink(
                        pull_cache[alias_name]):
                    # FIXME: this is not always the case, some are just aliases
                    display.deprecated(
                        '%s is kept for backwards compatibility but usage is discouraged. '
                        'The module documentation details page may explain more about this rationale.'
                        % name.lstrip('_'))
                return pull_cache[alias_name]

        return None
Exemple #34
0
    def parse_inventory(self, host_list):

        if isinstance(host_list, string_types):
            if "," in host_list:
                host_list = host_list.split(",")
                host_list = [ h for h in host_list if h and h.strip() ]

        self.parser = None

        # Always create the 'all' and 'ungrouped' groups, even if host_list is
        # empty: in this case we will subsequently an the implicit 'localhost' to it.

        ungrouped = Group('ungrouped')
        all = Group('all')
        all.add_child_group(ungrouped)
        base_groups = frozenset([all, ungrouped])

        self.groups = dict(all=all, ungrouped=ungrouped)

        if host_list is None:
            pass
        elif isinstance(host_list, list):
            for h in host_list:
                try:
                    (host, port) = parse_address(h, allow_ranges=False)
                except AnsibleError as e:
                    display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_text(e))
                    host = h
                    port = None

                new_host = Host(host, port)
                if h in C.LOCALHOST:
                    # set default localhost from inventory to avoid creating an implicit one. Last localhost defined 'wins'.
                    if self.localhost is not None:
                        display.warning("A duplicate localhost-like entry was found (%s). First found localhost was %s" % (h, self.localhost.name))
                    display.vvvv("Set default localhost to %s" % h)
                    self.localhost = new_host
                all.add_host(new_host)
        elif self._loader.path_exists(host_list):
            # TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins'
            if self.is_directory(host_list):
                # Ensure basedir is inside the directory
                host_list = os.path.join(self.host_list, "")
                self.parser = InventoryDirectory(loader=self._loader, groups=self.groups, filename=host_list)
            else:
                self.parser = get_file_parser(host_list, self.groups, self._loader)
                vars_loader.add_directory(self._basedir, with_subdir=True)

            if not self.parser:
                # should never happen, but JIC
                raise AnsibleError("Unable to parse %s as an inventory source" % host_list)
        else:
            display.warning("Host file not found: %s" % to_text(host_list))

        self._vars_plugins = [ x for x in vars_loader.all(self) ]

        ### POST PROCESS groups and hosts after specific parser was invoked

        group_names = set()
        # set group vars from group_vars/ files and vars plugins
        for g in self.groups:
            group = self.groups[g]
            group.vars = combine_vars(group.vars, self.get_group_variables(group.name))
            self.get_group_vars(group)
            group_names.add(group.name)

        host_names = set()
        # get host vars from host_vars/ files and vars plugins
        for host in self.get_hosts(ignore_limits=True, ignore_restrictions=True):
            host.vars = combine_vars(host.vars, self.get_host_variables(host.name))
            self.get_host_vars(host)
            host_names.add(host.name)

            mygroups = host.get_groups()

            # ensure hosts are always in 'all'
            if all not in mygroups:
                all.add_host(host)

            if ungrouped in mygroups:
                # clear ungrouped of any incorrectly stored by parser
                if set(mygroups).difference(base_groups):
                    host.remove_group(ungrouped)
            else:
                # add ungrouped hosts to ungrouped
                length = len(mygroups)
                if length == 0 or (length == 1 and all in mygroups):
                    ungrouped.add_host(host)

        # warn if overloading identifier as both group and host
        for conflict in group_names.intersection(host_names):
            display.warning("Found both group and host with same name: %s" % conflict)
Exemple #35
0
    def all(self, *args, **kwargs):
        ''' instantiates all plugins with the same arguments '''

        global _PLUGIN_FILTERS

        path_only = kwargs.pop('path_only', False)
        class_only = kwargs.pop('class_only', False)
        all_matches = []
        found_in_cache = True

        for i in self._get_paths():
            all_matches.extend(glob.glob(os.path.join(i, "*.py")))

        for path in sorted(all_matches, key=os.path.basename):
            name = os.path.basename(os.path.splitext(path)[0])

            if '__init__' in name or name in _PLUGIN_FILTERS[self.package]:
                continue

            if path_only:
                yield path
                continue

            if path not in self._module_cache:
                module = self._load_module_source(name, path)
                if module in self._module_cache.values():
                    # In ``_load_module_source`` if a plugin has a duplicate name, we just return the
                    # previously matched plugin from sys.modules, which means you are never getting both,
                    # just one, but cached for both paths, this isn't normally a problem, except with callbacks
                    # where it will run that single callback twice. This rejects duplicates.
                    continue
                self._module_cache[path] = module
                found_in_cache = False

            try:
                obj = getattr(self._module_cache[path], self.class_name)
            except AttributeError as e:
                display.warning(
                    "Skipping plugin (%s) as it seems to be invalid: %s" %
                    (path, to_text(e)))
                continue

            if self.base_class:
                # The import path is hardcoded and should be the right place,
                # so we are not expecting an ImportError.
                module = __import__(self.package, fromlist=[self.base_class])
                # Check whether this obj has the required base class.
                try:
                    plugin_class = getattr(module, self.base_class)
                except AttributeError:
                    continue
                if not issubclass(obj, plugin_class):
                    continue

            self._display_plugin_load(self.class_name,
                                      name,
                                      self._searched_paths,
                                      path,
                                      found_in_cache=found_in_cache,
                                      class_only=class_only)
            if not class_only:
                try:
                    obj = obj(*args, **kwargs)
                except TypeError as e:
                    display.warning(
                        "Skipping plugin (%s) as it seems to be incomplete: %s"
                        % (path, to_text(e)))

            self._update_object(obj, name, path)
            yield obj
Exemple #36
0
    def run(self, tmp=None, task_vars=None):
        ''' run the pause action module '''
        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        duration_unit = 'minutes'
        prompt = None
        seconds = None
        echo = True
        echo_prompt = ''
        result.update(
            dict(changed=False,
                 rc=0,
                 stderr='',
                 stdout='',
                 start=None,
                 stop=None,
                 delta=None,
                 echo=echo))

        if not set(self._task.args.keys()) <= set(self.PAUSE_TYPES):
            result['failed'] = True
            result[
                'msg'] = "Invalid argument given. Must be one of: %s" % ", ".join(
                    self.PAUSE_TYPES)
            return result

        # Should keystrokes be echoed to stdout?
        if 'echo' in self._task.args:
            try:
                echo = boolean(self._task.args['echo'])
            except TypeError as e:
                result['failed'] = True
                result['msg'] = to_native(e)
                return result

            # Add a note saying the output is hidden if echo is disabled
            if not echo:
                echo_prompt = ' (output is hidden)'

        # Is 'prompt' a key in 'args'?
        if 'prompt' in self._task.args:
            prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(),
                                      self._task.args['prompt'], echo_prompt)
        else:
            # If no custom prompt is specified, set a default prompt
            prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(
            ), 'Press enter to continue, Ctrl+C to interrupt', echo_prompt)

        # Are 'minutes' or 'seconds' keys that exist in 'args'?
        if 'minutes' in self._task.args or 'seconds' in self._task.args:
            try:
                if 'minutes' in self._task.args:
                    # The time() command operates in seconds so we need to
                    # recalculate for minutes=X values.
                    seconds = int(self._task.args['minutes']) * 60
                else:
                    seconds = int(self._task.args['seconds'])
                    duration_unit = 'seconds'

            except ValueError as e:
                result['failed'] = True
                result[
                    'msg'] = u"non-integer value given for prompt duration:\n%s" % to_text(
                        e)
                return result

        ########################################################################
        # Begin the hard work!

        start = time.time()
        result['start'] = to_text(datetime.datetime.now())
        result['user_input'] = b''

        stdin_fd = None
        old_settings = None
        try:
            if seconds is not None:
                if seconds < 1:
                    seconds = 1

                # setup the alarm handler
                signal.signal(signal.SIGALRM, timeout_handler)
                signal.alarm(seconds)

                # show the timer and control prompts
                display.display("Pausing for %d seconds%s" %
                                (seconds, echo_prompt))
                display.display(
                    "(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\r"
                ),

                # show the prompt specified in the task
                if 'prompt' in self._task.args:
                    display.display(prompt)

            else:
                display.display(prompt)

            # save the attributes on the existing (duped) stdin so
            # that we can restore them later after we set raw mode
            stdin_fd = None
            stdout_fd = None
            try:
                if PY3:
                    stdin = self._connection._new_stdin.buffer
                    stdout = sys.stdout.buffer
                else:
                    stdin = self._connection._new_stdin
                    stdout = sys.stdout
                stdin_fd = stdin.fileno()
                stdout_fd = stdout.fileno()
            except (ValueError, AttributeError):
                # ValueError: someone is using a closed file descriptor as stdin
                # AttributeError: someone is using a null file descriptor as stdin on windoez
                stdin = None

            if stdin_fd is not None:
                if isatty(stdin_fd):
                    # grab actual Ctrl+C sequence
                    try:
                        intr = termios.tcgetattr(stdin_fd)[6][termios.VINTR]
                    except Exception:
                        # unsupported/not present, use default
                        intr = b'\x03'  # value for Ctrl+C

                    # get backspace sequences
                    try:
                        backspace = termios.tcgetattr(stdin_fd)[6][
                            termios.VERASE]
                    except Exception:
                        backspace = [b'\x7f', b'\x08']

                    old_settings = termios.tcgetattr(stdin_fd)
                    tty.setraw(stdin_fd)

                    # Only set stdout to raw mode if it is a TTY. This is needed when redirecting
                    # stdout to a file since a file cannot be set to raw mode.
                    if isatty(stdout_fd):
                        tty.setraw(stdout_fd)

                    # Only echo input if no timeout is specified
                    if not seconds and echo:
                        new_settings = termios.tcgetattr(stdin_fd)
                        new_settings[3] = new_settings[3] | termios.ECHO
                        termios.tcsetattr(stdin_fd, termios.TCSANOW,
                                          new_settings)

                    # flush the buffer to make sure no previous key presses
                    # are read in below
                    termios.tcflush(stdin, termios.TCIFLUSH)

            while True:

                try:
                    if stdin_fd is not None:

                        key_pressed = stdin.read(1)

                        if key_pressed == intr:  # value for Ctrl+C
                            clear_line(stdout)
                            raise KeyboardInterrupt

                    if not seconds:
                        if stdin_fd is None or not isatty(stdin_fd):
                            display.warning(
                                "Not waiting for response to prompt as stdin is not interactive"
                            )
                            break

                        # read key presses and act accordingly
                        if key_pressed in (b'\r', b'\n'):
                            clear_line(stdout)
                            break
                        elif key_pressed in backspace:
                            # delete a character if backspace is pressed
                            result['user_input'] = result['user_input'][:-1]
                            clear_line(stdout)
                            if echo:
                                stdout.write(result['user_input'])
                            stdout.flush()
                        else:
                            result['user_input'] += key_pressed

                except KeyboardInterrupt:
                    signal.alarm(0)
                    display.display(
                        "Press 'C' to continue the play or 'A' to abort \r"),
                    if self._c_or_a(stdin):
                        clear_line(stdout)
                        break

                    clear_line(stdout)

                    raise AnsibleError('user requested abort!')

        except AnsibleTimeoutExceeded:
            # this is the exception we expect when the alarm signal
            # fires, so we simply ignore it to move into the cleanup
            pass
        finally:
            # cleanup and save some information
            # restore the old settings for the duped stdin stdin_fd
            if not (None in (stdin_fd, old_settings)) and isatty(stdin_fd):
                termios.tcsetattr(stdin_fd, termios.TCSADRAIN, old_settings)

            duration = time.time() - start
            result['stop'] = to_text(datetime.datetime.now())
            result['delta'] = int(duration)

            if duration_unit == 'minutes':
                duration = round(duration / 60.0, 2)
            else:
                duration = round(duration, 2)
            result['stdout'] = "Paused for %s %s" % (duration, duration_unit)

        result['user_input'] = to_text(result['user_input'],
                                       errors='surrogate_or_strict')
        return result
Exemple #37
0
    def get_plugin_list_text(self, loader):
        columns = display.columns
        displace = max(len(x) for x in self.plugin_list)
        linelimit = columns - displace - 5
        text = []
        deprecated = []
        for plugin in sorted(self.plugin_list):

            try:
                # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
                filename = loader.find_plugin(plugin,
                                              mod_type='.py',
                                              ignore_deprecated=True,
                                              check_aliases=True)

                if filename is None:
                    continue
                if filename.endswith(".ps1"):
                    continue
                if os.path.isdir(filename):
                    continue

                doc = None
                try:
                    doc = read_docstub(filename)
                except Exception:
                    display.warning("%s has a documentation formatting error" %
                                    plugin)
                    continue

                if not doc or not isinstance(doc, dict):
                    with open(filename) as f:
                        metadata = extract_metadata(module_data=f.read())
                    if metadata[0]:
                        if 'removed' not in metadata[0].get('status', []):
                            display.warning(
                                "%s parsing did not produce documentation." %
                                plugin)
                        else:
                            continue
                    desc = 'UNDOCUMENTED'
                else:
                    desc = self.tty_ify(
                        doc.get('short_description',
                                'INVALID SHORT DESCRIPTION').strip())

                if len(desc) > linelimit:
                    desc = desc[:linelimit] + '...'

                if plugin.startswith('_'):  # Handle deprecated
                    deprecated.append(
                        "%-*s %-*.*s" %
                        (displace, plugin[1:], linelimit, len(desc), desc))
                else:
                    text.append("%-*s %-*.*s" %
                                (displace, plugin, linelimit, len(desc), desc))
            except Exception as e:
                raise AnsibleError("Failed reading docs at %s: %s" %
                                   (plugin, to_native(e)),
                                   orig_exc=e)

        if len(deprecated) > 0:
            text.append("\nDEPRECATED:")
            text.extend(deprecated)
        return "\n".join(text)
    def _process_pending_results(self,
                                 iterator,
                                 one_pass=False,
                                 max_passes=None):
        '''
        Reads results off the final queue and takes appropriate action
        based on the result (executing callbacks, updating state, etc.).
        '''

        ret_results = []

        def get_original_host(host_name):
            host_name = to_text(host_name)
            if host_name in self._inventory._hosts_cache:
                return self._inventory._hosts_cache[host_name]
            else:
                return self._inventory.get_host(host_name)

        def search_handler_blocks_by_name(handler_name, handler_blocks):
            for handler_block in handler_blocks:
                for handler_task in handler_block.block:
                    if handler_task.name:
                        handler_vars = self._variable_manager.get_vars(
                            loader=self._loader,
                            play=iterator._play,
                            task=handler_task)
                        templar = Templar(loader=self._loader,
                                          variables=handler_vars)
                        try:
                            # first we check with the full result of get_name(), which may
                            # include the role name (if the handler is from a role). If that
                            # is not found, we resort to the simple name field, which doesn't
                            # have anything extra added to it.
                            target_handler_name = templar.template(
                                handler_task.name)
                            if target_handler_name == handler_name:
                                return handler_task
                            else:
                                target_handler_name = templar.template(
                                    handler_task.get_name())
                                if target_handler_name == handler_name:
                                    return handler_task
                        except (UndefinedError, AnsibleUndefinedVariable):
                            # We skip this handler due to the fact that it may be using
                            # a variable in the name that was conditionally included via
                            # set_fact or some other method, and we don't want to error
                            # out unnecessarily
                            continue
            return None

        def search_handler_blocks_by_uuid(handler_uuid, handler_blocks):
            for handler_block in handler_blocks:
                for handler_task in handler_block.block:
                    if handler_uuid == handler_task._uuid:
                        return handler_task
            return None

        def parent_handler_match(target_handler, handler_name):
            if target_handler:
                if isinstance(target_handler, (TaskInclude, IncludeRole)):
                    try:
                        handler_vars = self._variable_manager.get_vars(
                            loader=self._loader,
                            play=iterator._play,
                            task=target_handler)
                        templar = Templar(loader=self._loader,
                                          variables=handler_vars)
                        target_handler_name = templar.template(
                            target_handler.name)
                        if target_handler_name == handler_name:
                            return True
                        else:
                            target_handler_name = templar.template(
                                target_handler.get_name())
                            if target_handler_name == handler_name:
                                return True
                    except (UndefinedError, AnsibleUndefinedVariable):
                        pass
                return parent_handler_match(target_handler._parent,
                                            handler_name)
            else:
                return False

        # a Templar class to use for templating things later, as we're using
        # original/non-validated objects here on the manager side. We set the
        # variables in use later inside the loop below
        templar = Templar(loader=self._loader)

        cur_pass = 0
        while True:
            try:
                self._results_lock.acquire()
                task_result = self._results.pop()
            except IndexError:
                break
            finally:
                self._results_lock.release()

            # get the original host and task.  We then assign them to the TaskResult for use in callbacks/etc.
            original_host = get_original_host(task_result._host)
            original_task = iterator.get_original_task(original_host,
                                                       task_result._task)

            task_result._host = original_host
            task_result._task = original_task

            # get the correct loop var for use later
            if original_task.loop_control:
                loop_var = original_task.loop_control.loop_var or 'item'
            else:
                loop_var = 'item'

            # send callbacks for 'non final' results
            if '_ansible_retry' in task_result._result:
                self._tqm.send_callback('v2_runner_retry', task_result)
                continue
            elif '_ansible_item_result' in task_result._result:
                if task_result.is_failed() or task_result.is_unreachable():
                    self._tqm.send_callback('v2_runner_item_on_failed',
                                            task_result)
                elif task_result.is_skipped():
                    self._tqm.send_callback('v2_runner_item_on_skipped',
                                            task_result)
                else:
                    if 'diff' in task_result._result:
                        if self._diff:
                            self._tqm.send_callback('v2_on_file_diff',
                                                    task_result)
                    self._tqm.send_callback('v2_runner_item_on_ok',
                                            task_result)
                continue

            # get the vars for this task/host pair, make them the active set of vars for our templar above
            task_vars = self._variable_manager.get_vars(loader=self._loader,
                                                        play=iterator._play,
                                                        host=original_host,
                                                        task=original_task)
            self.add_tqm_variables(task_vars, play=iterator._play)
            templar.set_available_variables(task_vars)

            run_once = templar.template(original_task.run_once)
            if original_task.register:
                host_list = self.get_task_hosts(iterator, original_host,
                                                original_task)

                clean_copy = strip_internal_keys(task_result._result)
                if 'invocation' in clean_copy:
                    del clean_copy['invocation']

                for target_host in host_list:
                    self._variable_manager.set_nonpersistent_facts(
                        target_host, {original_task.register: clean_copy})

            # all host status messages contain 2 entries: (msg, task_result)
            role_ran = False
            if task_result.is_failed():
                role_ran = True
                ignore_errors = templar.template(original_task.ignore_errors)
                if not ignore_errors:
                    display.debug("marking %s as failed" % original_host.name)
                    if run_once:
                        # if we're using run_once, we have to fail every host here
                        for h in self._inventory.get_hosts(
                                iterator._play.hosts):
                            if h.name not in self._tqm._unreachable_hosts:
                                state, _ = iterator.get_next_task_for_host(
                                    h, peek=True)
                                iterator.mark_host_failed(h)
                                state, new_task = iterator.get_next_task_for_host(
                                    h, peek=True)
                    else:
                        iterator.mark_host_failed(original_host)

                    # increment the failed count for this host
                    self._tqm._stats.increment('failures', original_host.name)

                    # grab the current state and if we're iterating on the rescue portion
                    # of a block then we save the failed task in a special var for use
                    # within the rescue/always
                    state, _ = iterator.get_next_task_for_host(original_host,
                                                               peek=True)

                    if iterator.is_failed(
                            original_host
                    ) and state and state.run_state == iterator.ITERATING_COMPLETE:
                        self._tqm._failed_hosts[original_host.name] = True

                    if state and state.run_state == iterator.ITERATING_RESCUE:
                        self._variable_manager.set_nonpersistent_facts(
                            original_host,
                            dict(
                                ansible_failed_task=original_task.serialize(),
                                ansible_failed_result=task_result._result,
                            ),
                        )
                else:
                    self._tqm._stats.increment('ok', original_host.name)
                    if 'changed' in task_result._result and task_result._result[
                            'changed']:
                        self._tqm._stats.increment('changed',
                                                   original_host.name)
                self._tqm.send_callback('v2_runner_on_failed',
                                        task_result,
                                        ignore_errors=ignore_errors)
            elif task_result.is_unreachable():
                self._tqm._unreachable_hosts[original_host.name] = True
                iterator._play._removed_hosts.append(original_host.name)
                self._tqm._stats.increment('dark', original_host.name)
                self._tqm.send_callback('v2_runner_on_unreachable',
                                        task_result)
            elif task_result.is_skipped():
                self._tqm._stats.increment('skipped', original_host.name)
                self._tqm.send_callback('v2_runner_on_skipped', task_result)
            else:
                role_ran = True

                if original_task.loop:
                    # this task had a loop, and has more than one result, so
                    # loop over all of them instead of a single result
                    result_items = task_result._result.get('results', [])
                else:
                    result_items = [task_result._result]

                for result_item in result_items:
                    if '_ansible_notify' in result_item:
                        if task_result.is_changed():
                            # The shared dictionary for notified handlers is a proxy, which
                            # does not detect when sub-objects within the proxy are modified.
                            # So, per the docs, we reassign the list so the proxy picks up and
                            # notifies all other threads
                            for handler_name in result_item['_ansible_notify']:
                                found = False
                                # Find the handler using the above helper.  First we look up the
                                # dependency chain of the current task (if it's from a role), otherwise
                                # we just look through the list of handlers in the current play/all
                                # roles and use the first one that matches the notify name
                                target_handler = search_handler_blocks_by_name(
                                    handler_name, iterator._play.handlers)
                                if target_handler is not None:
                                    found = True
                                    if original_host not in self._notified_handlers[
                                            target_handler._uuid]:
                                        self._notified_handlers[
                                            target_handler._uuid].append(
                                                original_host)
                                        # FIXME: should this be a callback?
                                        display.vv("NOTIFIED HANDLER %s" %
                                                   (handler_name, ))
                                else:
                                    # As there may be more than one handler with the notified name as the
                                    # parent, so we just keep track of whether or not we found one at all
                                    for target_handler_uuid in self._notified_handlers:
                                        target_handler = search_handler_blocks_by_uuid(
                                            target_handler_uuid,
                                            iterator._play.handlers)
                                        if target_handler and parent_handler_match(
                                                target_handler, handler_name):
                                            self._notified_handlers[
                                                target_handler._uuid].append(
                                                    original_host)
                                            display.vv(
                                                "NOTIFIED HANDLER %s" %
                                                (target_handler.get_name(), ))
                                            found = True

                                if handler_name in self._listening_handlers:
                                    for listening_handler_uuid in self._listening_handlers[
                                            handler_name]:
                                        listening_handler = search_handler_blocks_by_uuid(
                                            listening_handler_uuid,
                                            iterator._play.handlers)
                                        if listening_handler is not None:
                                            found = True
                                        else:
                                            continue
                                        if original_host not in self._notified_handlers[
                                                listening_handler._uuid]:
                                            self._notified_handlers[
                                                listening_handler.
                                                _uuid].append(original_host)
                                            display.vv(
                                                "NOTIFIED HANDLER %s" %
                                                (listening_handler.get_name(),
                                                 ))

                                # and if none were found, then we raise an error
                                if not found:
                                    msg = "The requested handler '%s' was not found in either the main handlers list nor in the listening handlers list" % handler_name
                                    if C.ERROR_ON_MISSING_HANDLER:
                                        raise AnsibleError(msg)
                                    else:
                                        display.warning(msg)

                    if 'add_host' in result_item:
                        # this task added a new host (add_host module)
                        new_host_info = result_item.get('add_host', dict())
                        self._add_host(new_host_info, iterator)

                    elif 'add_group' in result_item:
                        # this task added a new group (group_by module)
                        self._add_group(original_host, result_item)

                    if 'ansible_facts' in result_item:

                        # if delegated fact and we are delegating facts, we need to change target host for them
                        if original_task.delegate_to is not None and original_task.delegate_facts:
                            item = result_item.get(loop_var, None)
                            if item is not None:
                                task_vars[loop_var] = item
                            host_name = templar.template(
                                original_task.delegate_to)
                            actual_host = self._inventory.get_host(host_name)
                            if actual_host is None:
                                actual_host = Host(name=host_name)
                        else:
                            actual_host = original_host

                        host_list = self.get_task_hosts(
                            iterator, actual_host, original_task)
                        if original_task.action == 'include_vars':

                            for (var_name, var_value) in iteritems(
                                    result_item['ansible_facts']):
                                # find the host we're actually referring too here, which may
                                # be a host that is not really in inventory at all
                                for target_host in host_list:
                                    self._variable_manager.set_host_variable(
                                        target_host, var_name, var_value)
                        else:
                            for target_host in host_list:
                                if original_task.action == 'set_fact':
                                    self._variable_manager.set_nonpersistent_facts(
                                        target_host,
                                        result_item['ansible_facts'].copy())
                                else:
                                    self._variable_manager.set_host_facts(
                                        target_host,
                                        result_item['ansible_facts'].copy())

                    if 'ansible_stats' in result_item and 'data' in result_item[
                            'ansible_stats'] and result_item['ansible_stats'][
                                'data']:

                        if 'per_host' not in result_item[
                                'ansible_stats'] or result_item[
                                    'ansible_stats']['per_host']:
                            host_list = self.get_task_hosts(
                                iterator, original_host, original_task)
                        else:
                            host_list = [None]

                        data = result_item['ansible_stats']['data']
                        aggregate = 'aggregate' in result_item[
                            'ansible_stats'] and result_item['ansible_stats'][
                                'aggregate']
                        for myhost in host_list:
                            for k in data.keys():
                                if aggregate:
                                    self._tqm._stats.update_custom_stats(
                                        k, data[k], myhost)
                                else:
                                    self._tqm._stats.set_custom_stats(
                                        k, data[k], myhost)

                if 'diff' in task_result._result:
                    if self._diff:
                        self._tqm.send_callback('v2_on_file_diff', task_result)

                if original_task.action not in ['include', 'include_role']:
                    self._tqm._stats.increment('ok', original_host.name)
                    if 'changed' in task_result._result and task_result._result[
                            'changed']:
                        self._tqm._stats.increment('changed',
                                                   original_host.name)

                # finally, send the ok for this task
                self._tqm.send_callback('v2_runner_on_ok', task_result)

            self._pending_results -= 1
            if original_host.name in self._blocked_hosts:
                del self._blocked_hosts[original_host.name]

            # If this is a role task, mark the parent role as being run (if
            # the task was ok or failed, but not skipped or unreachable)
            if original_task._role is not None and role_ran:  #TODO:  and original_task.action != 'include_role':?
                # lookup the role in the ROLE_CACHE to make sure we're dealing
                # with the correct object and mark it as executed
                for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[
                        original_task._role._role_name]):
                    if role_obj._uuid == original_task._role._uuid:
                        role_obj._had_task_run[original_host.name] = True

            ret_results.append(task_result)

            if one_pass or max_passes is not None and (cur_pass +
                                                       1) >= max_passes:
                break

            cur_pass += 1

        return ret_results
Exemple #39
0
    def run(self, tmp=None, task_vars=None):
        suffix_to_merge = self._task.args.get('suffix_to_merge', '')
        merged_var_name = self._task.args.get('merged_var_name', '')
        dedup = self._task.args.get('dedup', True)
        expected_type = self._task.args.get('expected_type')
        cacheable = bool(self._task.args.get('cacheable', False))
        recursive_dict_merge = bool(
            self._task.args.get('recursive_dict_merge', False))
        all_keys = task_vars.keys()

        # Validate args
        if expected_type not in ['dict', 'list']:
            raise AnsibleError("expected_type must be set ('dict' or 'list').")
        if not merged_var_name:
            raise AnsibleError("merged_var_name must be set")
        if not isidentifier(merged_var_name):
            raise AnsibleError(
                "merged_var_name '%s' is not a valid identifier" %
                merged_var_name)
        if not suffix_to_merge.endswith('__to_merge'):
            raise AnsibleError(
                "Merge suffix must end with '__to_merge', sorry!")
        if merged_var_name in all_keys:
            warning = "{} is already defined, are you sure you want to overwrite it?"
            display.warning(warning.format(merged_var_name))
            display.v("The contents of {} are: {}".format(
                merged_var_name, task_vars[merged_var_name]))

        keys = sorted(
            [key for key in task_vars.keys() if key.endswith(suffix_to_merge)])

        display.v("Merging vars in this order: {}".format(keys))

        # We need to render any jinja in the merged var now, because once it
        # leaves this plugin, ansible will cleanse it by turning any jinja tags
        # into comments.
        # And we need it done before merging the variables,
        # in case any structured data is specified with templates.
        merge_vals = [self._templar.template(task_vars[key]) for key in keys]

        # Dispatch based on type that we're merging
        if merge_vals == []:
            if expected_type == 'list':
                merged = []
            else:
                merged = {}  # pylint: disable=redefined-variable-type
        elif isinstance(merge_vals[0], list):
            merged = merge_list(merge_vals, dedup)
        elif isinstance(merge_vals[0], dict):
            merged = merge_dict(merge_vals, dedup, recursive_dict_merge)
        else:
            raise AnsibleError(
                "Don't know how to merge variables of type: {}".format(
                    type(merge_vals[0])))

        return {
            'ansible_facts': {
                merged_var_name: merged
            },
            'ansible_facts_cacheable': cacheable,
            'changed': False,
        }
Exemple #40
0
    def path_dwim_relative_stack(self, paths, dirname, source, is_role=False):
        '''
        find one file in first path in stack taking roles into account and adding play basedir as fallback

        :arg paths: A list of text strings which are the paths to look for the filename in.
        :arg dirname: A text string representing a directory.  The directory
            is prepended to the source to form the path to search for.
        :arg source: A text string which is the filename to search for
        :rtype: A text string
        :returns: An absolute path to the filename ``source``
        '''
        b_dirname = to_bytes(dirname)
        b_source = to_bytes(source)

        result = None
        if source is None:
            display.warning(
                'Invalid request to find a file that matches a "null" value')
        elif source and (source.startswith('~')
                         or source.startswith(os.path.sep)):
            # path is absolute, no relative needed, check existence and return source
            test_path = unfrackpath(b_source)
            if os.path.exists(to_bytes(test_path,
                                       errors='surrogate_or_strict')):
                result = test_path
        else:
            search = []
            display.debug(u'evaluation_path:\n\t%s' % '\n\t'.join(paths))
            for path in paths:
                upath = unfrackpath(path)
                b_upath = to_bytes(upath, errors='surrogate_or_strict')
                b_mydir = os.path.dirname(b_upath)

                # FIXME: this detection fails with non main.yml roles
                # if path is in role and 'tasks' not there already, add it into the search
                if is_role or self._is_role(path):
                    if b_mydir.endswith(b'tasks'):
                        search.append(
                            os.path.join(os.path.dirname(b_mydir), b_dirname,
                                         b_source))
                        search.append(os.path.join(b_mydir, b_source))
                    else:
                        # don't add dirname if user already is using it in source
                        if b_source.split(b'/')[0] != b_dirname:
                            search.append(
                                os.path.join(b_upath, b_dirname, b_source))
                        search.append(os.path.join(b_upath, b_source))

                elif b_dirname not in b_source.split(b'/'):
                    # don't add dirname if user already is using it in source
                    if b_source.split(b'/')[0] != dirname:
                        search.append(
                            os.path.join(b_upath, b_dirname, b_source))
                    search.append(os.path.join(b_upath, b_source))

            # always append basedir as last resort
            # don't add dirname if user already is using it in source
            if b_source.split(b'/')[0] != dirname:
                search.append(
                    os.path.join(to_bytes(self.get_basedir()), b_dirname,
                                 b_source))
            search.append(os.path.join(to_bytes(self.get_basedir()), b_source))

            display.debug(u'search_path:\n\t%s' %
                          to_text(b'\n\t'.join(search)))
            for b_candidate in search:
                display.vvvvv(u'looking for "%s" at "%s"' %
                              (source, to_text(b_candidate)))
                if os.path.exists(b_candidate):
                    result = to_text(b_candidate)
                    break

        return result
Exemple #41
0
    def format_plugin_doc(self, plugin, loader, plugin_type, search_paths):
        text = ''

        try:
            # if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
            filename = loader.find_plugin(plugin,
                                          mod_type='.py',
                                          ignore_deprecated=True,
                                          check_aliases=True)
            if filename is None:
                display.warning("%s %s not found in:\n%s\n" %
                                (plugin_type, plugin, search_paths))
                return

            if any(filename.endswith(x) for x in C.BLACKLIST_EXTS):
                return

            try:
                doc, plainexamples, returndocs, metadata = get_docstring(
                    filename,
                    fragment_loader,
                    verbose=(self.options.verbosity > 0))
            except Exception:
                display.vvv(traceback.format_exc())
                display.error(
                    "%s %s has a documentation error formatting or is missing documentation."
                    % (plugin_type, plugin),
                    wrap_text=False)
                return

            if doc is not None:

                # assign from other sections
                doc['plainexamples'] = plainexamples
                doc['returndocs'] = returndocs
                doc['metadata'] = metadata

                # generate extra data
                if plugin_type == 'module':
                    # is there corresponding action plugin?
                    if plugin in action_loader:
                        doc['action'] = True
                    else:
                        doc['action'] = False
                doc['filename'] = filename
                doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
                if 'docuri' in doc:
                    doc['docuri'] = doc[plugin_type].replace('_', '-')

                if self.options.show_snippet and plugin_type == 'module':
                    text += self.get_snippet_text(doc)
                else:
                    text += self.get_man_text(doc)

                return text
            else:
                if 'removed' in metadata.get('status', []):
                    display.warning("%s %s has been removed\n" %
                                    (plugin_type, plugin))
                    return

                # this typically means we couldn't even parse the docstring, not just that the YAML is busted,
                # probably a quoting issue.
                raise AnsibleError("Parsing produced an empty object.")
        except Exception as e:
            display.vvv(traceback.format_exc())
            raise AnsibleError(
                "%s %s missing documentation (or could not parse documentation): %s\n"
                % (plugin_type, plugin, str(e)))
Exemple #42
0
    def set_host_overrides(self, host, hostvars=None):
        '''
        Override WinRM-specific options from host variables.
        '''
        self._winrm_host = self._play_context.remote_addr
        self._winrm_port = int(self._play_context.port or 5986)
        self._winrm_scheme = hostvars.get('ansible_winrm_scheme', 'http' if self._winrm_port == 5985 else 'https')
        self._winrm_path = hostvars.get('ansible_winrm_path', '/wsman')
        self._winrm_user = self._play_context.remote_user
        self._winrm_pass = self._play_context.password
        self._become_method = self._play_context.become_method
        self._become_user = self._play_context.become_user
        self._become_pass = self._play_context.become_pass

        self._kinit_cmd  = hostvars.get('ansible_winrm_kinit_cmd', 'kinit')

        if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'):
            self._winrm_supported_authtypes = set(winrm.FEATURE_SUPPORTED_AUTHTYPES)
        else:
            # for legacy versions of pywinrm, use the values we know are supported
            self._winrm_supported_authtypes = set(['plaintext','ssl','kerberos'])

        # TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic
        transport_selector = 'ssl' if self._winrm_scheme == 'https' else 'plaintext'

        if HAVE_KERBEROS and ((self._winrm_user and '@' in self._winrm_user)):
            self._winrm_transport = 'kerberos,%s' % transport_selector
        else:
            self._winrm_transport = transport_selector
        self._winrm_transport = hostvars.get('ansible_winrm_transport', self._winrm_transport)
        if isinstance(self._winrm_transport, string_types):
            self._winrm_transport = [x.strip() for x in self._winrm_transport.split(',') if x.strip()]

        unsupported_transports = set(self._winrm_transport).difference(self._winrm_supported_authtypes)

        if unsupported_transports:
            raise AnsibleError('The installed version of WinRM does not support transport(s) %s' % list(unsupported_transports))

        # if kerberos is among our transports and there's a password specified, we're managing the tickets
        kinit_mode = str(hostvars.get('ansible_winrm_kinit_mode', '')).strip()
        if kinit_mode == "":
            # HACK: ideally, remove multi-transport stuff
            self._kerb_managed = "kerberos" in self._winrm_transport and self._winrm_pass
        elif kinit_mode == "managed":
            self._kerb_managed = True
        elif kinit_mode == "manual":
            self._kerb_managed = False
        else:
            raise AnsibleError('Unknown ansible_winrm_kinit_mode value: "%s" (must be "managed" or "manual")' % kinit_mode)

        # arg names we're going passing directly
        internal_kwarg_mask = set(['self', 'endpoint', 'transport', 'username', 'password', 'scheme', 'path', 'kinit_mode', 'kinit_cmd'])

        self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass)
        argspec = inspect.getargspec(Protocol.__init__)
        supported_winrm_args = set(argspec.args)
        supported_winrm_args.update(internal_kwarg_mask)
        passed_winrm_args = set([v.replace('ansible_winrm_', '') for v in hostvars if v.startswith('ansible_winrm_')])
        unsupported_args = passed_winrm_args.difference(supported_winrm_args)

        # warn for kwargs unsupported by the installed version of pywinrm
        for arg in unsupported_args:
            display.warning("ansible_winrm_{0} unsupported by pywinrm (is an up-to-date version of pywinrm installed?)".format(arg))

        # pass through matching kwargs, excluding the list we want to treat specially
        for arg in passed_winrm_args.difference(internal_kwarg_mask).intersection(supported_winrm_args):
            self._winrm_kwargs[arg] = hostvars['ansible_winrm_%s' % arg]
Exemple #43
0
def _extract_param(template, root, attrs, value):

    key = None
    when = attrs.get('when')
    conditional = "{%% if %s %%}True{%% else %%}False{%% endif %%}" % when
    param_to_xpath_map = attrs['items']

    if isinstance(value, Mapping):
        key = value.get('key', None)
        if key:
            value = value['values']

    entries = dict() if key else list()

    for element in root.findall(attrs['top']):
        entry = dict()
        item_dict = dict()
        for param, param_xpath in iteritems(param_to_xpath_map):
            fields = None
            try:
                fields = element.findall(param_xpath)
            except:
                display.warning(
                    "Failed to evaluate value of '%s' with XPath '%s'.\nUnexpected error: %s."
                    % (param, param_xpath, traceback.format_exc()))

            tags = param_xpath.split('/')

            # check if xpath ends with attribute.
            # If yes set attribute key/value dict to param value in case attribute matches
            # else if it is a normal xpath assign matched element text value.
            if len(tags) and tags[-1].endswith(']'):
                if fields:
                    if len(fields) > 1:
                        item_dict[param] = [field.attrib for field in fields]
                    else:
                        item_dict[param] = fields[0].attrib
                else:
                    item_dict[param] = {}
            else:
                if fields:
                    if len(fields) > 1:
                        item_dict[param] = [field.text for field in fields]
                    else:
                        item_dict[param] = fields[0].text
                else:
                    item_dict[param] = None

        if isinstance(value, Mapping):
            for item_key, item_value in iteritems(value):
                entry[item_key] = template(item_value, {'item': item_dict})
        else:
            entry = template(value, {'item': item_dict})

        if key:
            expanded_key = template(key, {'item': item_dict})
            if when:
                if template(conditional,
                            {'item': {
                                'key': expanded_key,
                                'value': entry
                            }}):
                    entries[expanded_key] = entry
            else:
                entries[expanded_key] = entry
        else:
            if when:
                if template(conditional, {'item': entry}):
                    entries.append(entry)
            else:
                entries.append(entry)

    return entries
Exemple #44
0
def _find_snippet_imports(module_name, module_data, module_path, module_args,
                          task_vars, module_compression):
    """
    Given the source of the module, convert it to a Jinja2 template to insert
    module code and return whether it's a new or old style module.
    """

    module_substyle = module_style = 'old'

    # module_style is something important to calling code (ActionBase).  It
    # determines how arguments are formatted (json vs k=v) and whether
    # a separate arguments file needs to be sent over the wire.
    # module_substyle is extra information that's useful internally.  It tells
    # us what we have to look to substitute in the module files and whether
    # we're using module replacer or ansiballz to format the module itself.
    if _is_binary(module_data):
        module_substyle = module_style = 'binary'
    elif REPLACER in module_data:
        # Do REPLACER before from ansible.module_utils because we need make sure
        # we substitute "from ansible.module_utils basic" for REPLACER
        module_style = 'new'
        module_substyle = 'python'
        module_data = module_data.replace(
            REPLACER, b'from ansible.module_utils.basic import *')
    elif b'from ansible.module_utils.' in module_data:
        module_style = 'new'
        module_substyle = 'python'
    elif REPLACER_WINDOWS in module_data:
        module_style = 'new'
        module_substyle = 'powershell'
    elif REPLACER_JSONARGS in module_data:
        module_style = 'new'
        module_substyle = 'jsonargs'
    elif b'WANT_JSON' in module_data:
        module_substyle = module_style = 'non_native_want_json'

    shebang = None
    # Neither old-style, non_native_want_json nor binary modules should be modified
    # except for the shebang line (Done by modify_module)
    if module_style in ('old', 'non_native_want_json', 'binary'):
        return module_data, module_style, shebang

    output = BytesIO()
    py_module_names = set()

    if module_substyle == 'python':
        params = dict(ANSIBLE_MODULE_ARGS=module_args, )
        python_repred_params = repr(json.dumps(params))

        try:
            compression_method = getattr(zipfile, module_compression)
        except AttributeError:
            display.warning(
                u'Bad module compression string specified: %s.  Using ZIP_STORED (no compression)'
                % module_compression)
            compression_method = zipfile.ZIP_STORED

        lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache')
        cached_module_filename = os.path.join(
            lookup_path, "%s-%s" % (module_name, module_compression))

        zipdata = None
        # Optimization -- don't lock if the module has already been cached
        if os.path.exists(cached_module_filename):
            display.debug('ANSIBALLZ: using cached module: %s' %
                          cached_module_filename)
            zipdata = open(cached_module_filename, 'rb').read()
        else:
            if module_name in action_write_locks.action_write_locks:
                display.debug('ANSIBALLZ: Using lock for %s' % module_name)
                lock = action_write_locks.action_write_locks[module_name]
            else:
                # If the action plugin directly invokes the module (instead of
                # going through a strategy) then we don't have a cross-process
                # Lock specifically for this module.  Use the "unexpected
                # module" lock instead
                display.debug('ANSIBALLZ: Using generic lock for %s' %
                              module_name)
                lock = action_write_locks.action_write_locks[None]

            display.debug('ANSIBALLZ: Acquiring lock')
            with lock:
                display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock))
                # Check that no other process has created this while we were
                # waiting for the lock
                if not os.path.exists(cached_module_filename):
                    display.debug('ANSIBALLZ: Creating module')
                    # Create the module zip data
                    zipoutput = BytesIO()
                    zf = zipfile.ZipFile(zipoutput,
                                         mode='w',
                                         compression=compression_method)
                    # Note: If we need to import from release.py first,
                    # remember to catch all exceptions: https://github.com/ansible/ansible/issues/16523
                    zf.writestr(
                        'ansible/__init__.py',
                        b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n__version__="'
                        + to_bytes(__version__) + b'"\n__author__="' +
                        to_bytes(__author__) + b'"\n')
                    zf.writestr(
                        'ansible/module_utils/__init__.py',
                        b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n'
                    )

                    zf.writestr('ansible_module_%s.py' % module_name,
                                module_data)

                    py_module_cache = {('__init__', ): b''}
                    recursive_finder(module_name, module_data, py_module_names,
                                     py_module_cache, zf)
                    zf.close()
                    zipdata = base64.b64encode(zipoutput.getvalue())

                    # Write the assembled module to a temp file (write to temp
                    # so that no one looking for the file reads a partially
                    # written file)
                    if not os.path.exists(lookup_path):
                        # Note -- if we have a global function to setup, that would
                        # be a better place to run this
                        os.makedirs(lookup_path)
                    display.debug('ANSIBALLZ: Writing module')
                    with open(cached_module_filename + '-part', 'wb') as f:
                        f.write(zipdata)

                    # Rename the file into its final position in the cache so
                    # future users of this module can read it off the
                    # filesystem instead of constructing from scratch.
                    display.debug('ANSIBALLZ: Renaming module')
                    os.rename(cached_module_filename + '-part',
                              cached_module_filename)
                    display.debug('ANSIBALLZ: Done creating module')

            if zipdata is None:
                display.debug('ANSIBALLZ: Reading module after lock')
                # Another process wrote the file while we were waiting for
                # the write lock.  Go ahead and read the data from disk
                # instead of re-creating it.
                try:
                    zipdata = open(cached_module_filename, 'rb').read()
                except IOError:
                    raise AnsibleError(
                        'A different worker process failed to create module file.'
                        ' Look at traceback for that process for debugging information.'
                    )
        zipdata = to_text(zipdata, errors='surrogate_or_strict')

        shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars)
        if shebang is None:
            shebang = u'#!/usr/bin/python'

        # Enclose the parts of the interpreter in quotes because we're
        # substituting it into the template as a Python string
        interpreter_parts = interpreter.split(u' ')
        interpreter = u"'{0}'".format(u"', '".join(interpreter_parts))

        now = datetime.datetime.utcnow()
        output.write(
            to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
                zipdata=zipdata,
                ansible_module=module_name,
                params=python_repred_params,
                shebang=shebang,
                interpreter=interpreter,
                coding=ENCODING_STRING,
                year=now.year,
                month=now.month,
                day=now.day,
                hour=now.hour,
                minute=now.minute,
                second=now.second,
            )))
        module_data = output.getvalue()

    elif module_substyle == 'powershell':
        # Module replacer for jsonargs and windows
        lines = module_data.split(b'\n')
        for line in lines:
            if REPLACER_WINDOWS in line:
                ps_data = _slurp(os.path.join(_SNIPPET_PATH, "powershell.ps1"))
                output.write(ps_data)
                py_module_names.add((b'powershell', ))
                continue
            output.write(line + b'\n')
        module_data = output.getvalue()

        module_args_json = to_bytes(json.dumps(module_args))
        module_data = module_data.replace(REPLACER_JSONARGS, module_args_json)

        # Powershell/winrm don't actually make use of shebang so we can
        # safely set this here.  If we let the fallback code handle this
        # it can fail in the presence of the UTF8 BOM commonly added by
        # Windows text editors
        shebang = u'#!powershell'

        # Sanity check from 1.x days.  This is currently useless as we only
        # get here if we are going to substitute powershell.ps1 into the
        # module anyway.  Leaving it for when/if we add other powershell
        # module_utils files.
        if (b'powershell', ) not in py_module_names:
            raise AnsibleError(
                "missing required import in %s: # POWERSHELL_COMMON" %
                module_path)

    elif module_substyle == 'jsonargs':
        module_args_json = to_bytes(json.dumps(module_args))

        # these strings could be included in a third-party module but
        # officially they were included in the 'basic' snippet for new-style
        # python modules (which has been replaced with something else in
        # ansiballz) If we remove them from jsonargs-style module replacer
        # then we can remove them everywhere.
        python_repred_args = to_bytes(repr(module_args_json))
        module_data = module_data.replace(REPLACER_VERSION,
                                          to_bytes(repr(__version__)))
        module_data = module_data.replace(REPLACER_COMPLEX, python_repred_args)
        module_data = module_data.replace(
            REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))

        # The main event -- substitute the JSON args string into the module
        module_data = module_data.replace(REPLACER_JSONARGS, module_args_json)

        facility = b'syslog.' + to_bytes(task_vars.get(
            'ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY),
                                         errors='surrogate_or_strict')
        module_data = module_data.replace(b'syslog.LOG_USER', facility)

    return (module_data, module_style, shebang)
Exemple #45
0
    def run(self):

        super(DocCLI, self).run()

        plugin_type = self.options.type

        # choose plugin type
        if plugin_type == 'cache':
            loader = cache_loader
        elif plugin_type == 'callback':
            loader = callback_loader
        elif plugin_type == 'connection':
            loader = connection_loader
        elif plugin_type == 'lookup':
            loader = lookup_loader
        elif plugin_type == 'strategy':
            loader = strategy_loader
        elif plugin_type == 'vars':
            loader = vars_loader
        elif plugin_type == 'inventory':
            loader = inventory_loader
        elif plugin_type == 'shell':
            loader = shell_loader
        else:
            loader = module_loader

        # add to plugin path from command line
        if self.options.module_path:
            for path in self.options.module_path:
                if path:
                    loader.add_directory(path)

        # save only top level paths for errors
        search_paths = DocCLI.print_paths(loader)
        loader._paths = None  # reset so we can use subdirs below

        # list plugins names and filepath for type
        if self.options.list_files:
            paths = loader._get_paths()
            for path in paths:
                self.find_plugins(path, plugin_type)

            list_text = self.get_plugin_list_filenames(loader)
            self.pager(list_text)
            return 0

        # list plugins for type
        if self.options.list_dir:
            paths = loader._get_paths()
            for path in paths:
                self.find_plugins(path, plugin_type)

            self.pager(self.get_plugin_list_text(loader))
            return 0

        # process all plugins of type
        if self.options.all_plugins:
            paths = loader._get_paths()
            for path in paths:
                self.find_plugins(path, plugin_type)
            self.args = sorted(set(self.plugin_list))

        if len(self.args) == 0:
            raise AnsibleOptionsError("Incorrect options passed")

        # process command line list
        text = ''
        for plugin in self.args:
            try:
                # if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
                filename = loader.find_plugin(plugin,
                                              mod_type='.py',
                                              ignore_deprecated=True,
                                              check_aliases=True)
                if filename is None:
                    display.warning("%s %s not found in:\n%s\n" %
                                    (plugin_type, plugin, search_paths))
                    continue

                if any(filename.endswith(x) for x in C.BLACKLIST_EXTS):
                    continue

                try:
                    doc, plainexamples, returndocs, metadata = get_docstring(
                        filename,
                        fragment_loader,
                        verbose=(self.options.verbosity > 0))
                except:
                    display.vvv(traceback.format_exc())
                    display.error(
                        "%s %s has a documentation error formatting or is missing documentation."
                        % (plugin_type, plugin),
                        wrap_text=False)
                    continue

                if doc is not None:

                    # assign from other sections
                    doc['plainexamples'] = plainexamples
                    doc['returndocs'] = returndocs
                    doc['metadata'] = metadata

                    # generate extra data
                    if plugin_type == 'module':
                        # is there corresponding action plugin?
                        if plugin in action_loader:
                            doc['action'] = True
                        else:
                            doc['action'] = False
                    doc['filename'] = filename
                    doc['now_date'] = datetime.date.today().strftime(
                        '%Y-%m-%d')
                    if 'docuri' in doc:
                        doc['docuri'] = doc[plugin_type].replace('_', '-')

                    if self.options.show_snippet and plugin_type == 'module':
                        text += self.get_snippet_text(doc)
                    else:
                        text += self.get_man_text(doc)
                else:
                    # this typically means we couldn't even parse the docstring, not just that the YAML is busted,
                    # probably a quoting issue.
                    raise AnsibleError("Parsing produced an empty object.")
            except Exception as e:
                display.vvv(traceback.format_exc())
                raise AnsibleError(
                    "%s %s missing documentation (or could not parse documentation): %s\n"
                    % (plugin_type, plugin, str(e)))

        if text:
            self.pager(text)
        return 0
Exemple #46
0
    def parse_source(self, source, cache=True):
        ''' Generate or update inventory for the source provided '''

        parsed = False
        display.debug(u'Examining possible inventory source: %s' % source)

        b_source = to_bytes(source)
        # process directories as a collection of inventories
        if os.path.isdir(b_source):
            display.debug(u'Searching for inventory files in directory: %s' %
                          source)
            for i in sorted(os.listdir(b_source)):

                display.debug(u'Considering %s' % i)
                # Skip hidden files and stuff we explicitly ignore
                if IGNORED.search(i):
                    continue

                # recursively deal with directory entries
                fullpath = os.path.join(b_source, i)
                parsed_this_one = self.parse_source(to_text(fullpath))
                display.debug(u'parsed %s as %s' % (fullpath, parsed_this_one))
                if not parsed:
                    parsed = parsed_this_one
        else:
            # left with strings or files, let plugins figure it out

            # set so new hosts can use for inventory_file/dir vasr
            self._inventory.current_source = source

            # get inventory plugins if needed, there should always be at least one generator
            if not self._inventory_plugins:
                self._setup_inventory_plugins()

            # try source with each plugin
            failures = []
            for plugin in self._inventory_plugins:
                plugin_name = to_text(
                    getattr(plugin, '_load_name',
                            getattr(plugin, '_original_path', '')))
                display.debug(u'Attempting to use plugin %s' % plugin_name)

                # initialize
                if plugin.verify_file(source):
                    try:
                        plugin.parse(self._inventory,
                                     self._loader,
                                     source,
                                     cache=cache)
                        parsed = True
                        display.vvv(
                            u'Parsed %s inventory source with %s plugin' %
                            (to_text(source), plugin_name))
                        break
                    except AnsibleParserError as e:
                        failures.append(
                            u'\n* Failed to parse %s with %s inventory plugin: %s\n'
                            % (to_text(source), plugin_name, to_text(e)))
                else:
                    display.debug(u'%s did not meet %s requirements' %
                                  (to_text(source), plugin_name))
            else:
                if failures:
                    # only if no plugin processed files should we show errors.
                    for fail in failures:
                        display.warning(fail)

        if not parsed:
            display.warning(u"Unable to parse %s as an inventory source" %
                            to_text(source))

        # clear up, jic
        self._inventory.current_source = None

        return parsed
Exemple #47
0
    def _fixup_perms(self,
                     remote_path,
                     remote_user,
                     execute=True,
                     recursive=True):
        """
        We need the files we upload to be readable (and sometimes executable)
        by the user being sudo'd to but we want to limit other people's access
        (because the files could contain passwords or other private
        information.  We achieve this in one of these ways:

        * If no sudo is performed or the remote_user is sudo'ing to
          themselves, we don't have to change permisions.
        * If the remote_user sudo's to a privileged user (for instance, root),
          we don't have to change permissions
        * If the remote_user is a privileged user and sudo's to an
          unprivileged user then we change the owner of the file to the
          unprivileged user so they can read it.
        * If the remote_user is an unprivieged user and we're sudo'ing to
          a second unprivileged user then we attempt to grant the second
          unprivileged user access via file system acls.
        * If granting file system acls fails we can set the file to be world
          readable so that the second unprivileged user can read the file.
          Since this could allow other users to get access to private
          information we only do this ansible is configured with
          "allow_world_readable_tmpfiles" in the ansible.cfg
        """
        if self._connection._shell.SHELL_FAMILY == 'powershell':
            # This won't work on Powershell as-is, so we'll just completely skip until
            # we have a need for it, at which point we'll have to do something different.
            return remote_path

        if remote_path is None:
            # Sometimes code calls us naively -- it has a var which could
            # contain a path to a tmp dir but doesn't know if it needs to
            # exist or not.  If there's no path, then there's no need for us
            # to do work
            display.debug(
                '_fixup_perms called with remote_path==None.  Sure this is correct?'
            )
            return remote_path

        if self._play_context.become and self._play_context.become_user not in (
                'root', remote_user):
            # Unprivileged user that's different than the ssh user.  Let's get
            # to work!

            # Try chown'ing the file. This will only work if our SSH user has
            # root privileges, but since we can't reliably determine that from
            # the username (think "toor" on FreeBSD), let's just try first and
            # apologize later:
            res = self._remote_chown(remote_path,
                                     self._play_context.become_user,
                                     recursive=recursive)
            if res['rc'] == 0:
                # root can read things that don't have read bit but can't
                # execute them without the execute bit, so we might need to
                # set that even if we're root. We just ran chown successfully,
                # so apparently we are root.
                if execute:
                    res = self._remote_chmod('u+x',
                                             remote_path,
                                             recursive=recursive)
                    if res['rc'] != 0:
                        raise AnsibleError(
                            'Failed to set file mode on remote temporary files (rc: {0}, err: {1})'
                            .format(res['rc'], res['stderr']))

            elif remote_user == 'root':
                raise AnsibleError(
                    'Failed to change ownership of the temporary files Ansible needs to create despite connecting as root.  Unprivileged become user would be unable to read the file.'
                )
            else:
                # Chown'ing failed. We're probably lacking root privileges; let's try something else.
                if execute:
                    mode = 'rx'
                else:
                    mode = 'rX'
                # Try to use fs acls to solve this problem
                res = self._remote_set_user_facl(
                    remote_path,
                    self._play_context.become_user,
                    mode,
                    recursive=recursive,
                    sudoable=False)
                if res['rc'] != 0:
                    if C.ALLOW_WORLD_READABLE_TMPFILES:
                        # fs acls failed -- do things this insecure way only
                        # if the user opted in in the config file
                        display.warning(
                            'Using world-readable permissions for temporary files Ansible needs to create when becoming an unprivileged user which may be insecure. For information on securing this, see https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user'
                        )
                        res = self._remote_chmod('a+%s' % mode,
                                                 remote_path,
                                                 recursive=recursive)
                        if res['rc'] != 0:
                            raise AnsibleError(
                                'Failed to set file mode on remote files (rc: {0}, err: {1})'
                                .format(res['rc'], res['stderr']))
                    else:
                        raise AnsibleError(
                            'Failed to set permissions on the temporary files Ansible needs to create when becoming an unprivileged user. For information on working around this, see https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user'
                        )
        elif execute:
            # Can't depend on the file being transferred with execute
            # permissions.  Only need user perms because no become was
            # used here
            res = self._remote_chmod('u+x', remote_path, recursive=recursive)
            if res['rc'] != 0:
                raise AnsibleError(
                    'Failed to set file mode on remote files (rc: {0}, err: {1})'
                    .format(res['rc'], res['stderr']))

        return remote_path
Exemple #48
0
    def get_vars(self,
                 loader,
                 play=None,
                 host=None,
                 task=None,
                 include_hostvars=True,
                 include_delegate_to=True,
                 use_cache=True):
        '''
        Returns the variables, with optional "context" given via the parameters
        for the play, host, and task (which could possibly result in different
        sets of variables being returned due to the additional context).

        The order of precedence is:
        - play->roles->get_default_vars (if there is a play context)
        - group_vars_files[host] (if there is a host context)
        - host_vars_files[host] (if there is a host context)
        - host->get_vars (if there is a host context)
        - fact_cache[host] (if there is a host context)
        - play vars (if there is a play context)
        - play vars_files (if there's no host context, ignore
          file names that cannot be templated)
        - task->get_vars (if there is a task context)
        - vars_cache[host] (if there is a host context)
        - extra vars
        '''

        display.debug("in VariableManager get_vars()")
        cache_entry = self._get_cache_entry(play=play, host=host, task=task)
        if cache_entry in VARIABLE_CACHE and use_cache:
            display.debug("vars are cached, returning them now")
            return VARIABLE_CACHE[cache_entry]

        all_vars = dict()
        magic_variables = self._get_magic_variables(
            loader=loader,
            play=play,
            host=host,
            task=task,
            include_hostvars=include_hostvars,
            include_delegate_to=include_delegate_to,
        )

        if play:
            # first we compile any vars specified in defaults/main.yml
            # for all roles within the specified play
            for role in play.get_roles():
                all_vars = combine_vars(all_vars, role.get_default_vars())

        # if we have a task in this context, and that task has a role, make
        # sure it sees its defaults above any other roles, as we previously
        # (v1) made sure each task had a copy of its roles default vars
        if task and task._role is not None and (play or task.action
                                                == 'include_role'):
            all_vars = combine_vars(
                all_vars,
                task._role.get_default_vars(dep_chain=task.get_dep_chain()))

        if host:
            # next, if a host is specified, we load any vars from group_vars
            # files and then any vars from host_vars files which may apply to
            # this host or the groups it belongs to

            # we merge in the special 'all' group_vars first, if they exist
            if 'all' in self._group_vars_files:
                data = preprocess_vars(self._group_vars_files['all'])
                for item in data:
                    all_vars = combine_vars(all_vars, item)

            # we merge in vars from groups specified in the inventory (INI or script)
            all_vars = combine_vars(all_vars, host.get_group_vars())

            for group in sorted(host.get_groups(),
                                key=lambda g: (g.depth, g.name)):
                if group.name in self._group_vars_files and group.name != 'all':
                    for data in self._group_vars_files[group.name]:
                        data = preprocess_vars(data)
                        for item in data:
                            all_vars = combine_vars(all_vars, item)

            # then we merge in vars from the host specified in the inventory (INI or script)
            all_vars = combine_vars(all_vars, host.get_vars())

            # then we merge in the host_vars/<hostname> file, if it exists
            host_name = host.get_name()
            if host_name in self._host_vars_files:
                for data in self._host_vars_files[host_name]:
                    data = preprocess_vars(data)
                    for item in data:
                        all_vars = combine_vars(all_vars, item)

            # finally, the facts caches for this host, if it exists
            try:
                host_facts = wrap_var(self._fact_cache.get(host.name, dict()))
                all_vars = combine_vars(all_vars, host_facts)
            except KeyError:
                pass

        if play:
            all_vars = combine_vars(all_vars, play.get_vars())

            for vars_file_item in play.get_vars_files():
                # create a set of temporary vars here, which incorporate the extra
                # and magic vars so we can properly template the vars_files entries
                temp_vars = combine_vars(all_vars, self._extra_vars)
                temp_vars = combine_vars(temp_vars, magic_variables)
                templar = Templar(loader=loader, variables=temp_vars)

                # we assume each item in the list is itself a list, as we
                # support "conditional includes" for vars_files, which mimics
                # the with_first_found mechanism.
                vars_file_list = vars_file_item
                if not isinstance(vars_file_list, list):
                    vars_file_list = [vars_file_list]

                # now we iterate through the (potential) files, and break out
                # as soon as we read one from the list. If none are found, we
                # raise an error, which is silently ignored at this point.
                try:
                    for vars_file in vars_file_list:
                        vars_file = templar.template(vars_file)
                        try:
                            data = preprocess_vars(
                                loader.load_from_file(vars_file))
                            if data is not None:
                                for item in data:
                                    all_vars = combine_vars(all_vars, item)
                            break
                        except AnsibleFileNotFound as e:
                            # we continue on loader failures
                            continue
                        except AnsibleParserError as e:
                            raise
                    else:
                        # if include_delegate_to is set to False, we ignore the missing
                        # vars file here because we're working on a delegated host
                        if include_delegate_to:
                            raise AnsibleFileNotFound(
                                "vars file %s was not found" % vars_file_item)
                except (UndefinedError, AnsibleUndefinedVariable):
                    if host is not None and self._fact_cache.get(
                            host.name,
                            dict()).get('module_setup') and task is not None:
                        raise AnsibleUndefinedVariable(
                            "an undefined variable was found when attempting to template the vars_files item '%s'"
                            % vars_file_item,
                            obj=vars_file_item)
                    else:
                        # we do not have a full context here, and the missing variable could be
                        # because of that, so just show a warning and continue
                        display.vvv(
                            "skipping vars_file '%s' due to an undefined variable"
                            % vars_file_item)
                        continue

            # By default, we now merge in all vars from all roles in the play,
            # unless the user has disabled this via a config option
            if not C.DEFAULT_PRIVATE_ROLE_VARS:
                for role in play.get_roles():
                    all_vars = combine_vars(
                        all_vars, role.get_vars(include_params=False))

        # next, we merge in the vars from the role, which will specifically
        # follow the role dependency chain, and then we merge in the tasks
        # vars (which will look at parent blocks/task includes)
        if task:
            if task._role:
                all_vars = combine_vars(
                    all_vars,
                    task._role.get_vars(task.get_dep_chain(),
                                        include_params=False))
            all_vars = combine_vars(all_vars, task.get_vars())

        # next, we merge in the vars cache (include vars) and nonpersistent
        # facts cache (set_fact/register), in that order
        if host:
            all_vars = combine_vars(
                all_vars, self._vars_cache.get(host.get_name(), dict()))
            all_vars = combine_vars(
                all_vars,
                self._nonpersistent_fact_cache.get(host.name, dict()))

        # next, we merge in role params and task include params
        if task:
            if task._role:
                all_vars = combine_vars(
                    all_vars, task._role.get_role_params(task.get_dep_chain()))

            # special case for include tasks, where the include params
            # may be specified in the vars field for the task, which should
            # have higher precedence than the vars/np facts above
            all_vars = combine_vars(all_vars, task.get_include_params())

        # finally, we merge in extra vars and the magic variables
        all_vars = combine_vars(all_vars, self._extra_vars)
        all_vars = combine_vars(all_vars, magic_variables)

        # special case for the 'environment' magic variable, as someone
        # may have set it as a variable and we don't want to stomp on it
        if task:
            if 'environment' not in all_vars:
                all_vars['environment'] = task.environment
            else:
                display.warning(
                    "The variable 'environment' appears to be used already, which is also used internally for environment variables set on the task/block/play. You should use a different variable name to avoid conflicts with this internal variable"
                )

        # if we have a task and we're delegating to another host, figure out the
        # variables for that host now so we don't have to rely on hostvars later
        if task and task.delegate_to is not None and include_delegate_to:
            all_vars['ansible_delegated_vars'] = self._get_delegated_vars(
                loader, play, task, all_vars)

        #VARIABLE_CACHE[cache_entry] = all_vars
        if task or play:
            all_vars['vars'] = all_vars.copy()

        display.debug("done with get_vars()")
        return all_vars
Exemple #49
0
    def run(self, iterator, play_context):
        '''
        The "free" strategy is a bit more complex, in that it allows tasks to
        be sent to hosts as quickly as they can be processed. This means that
        some hosts may finish very quickly if run tasks result in little or no
        work being done versus other systems.

        The algorithm used here also tries to be more "fair" when iterating
        through hosts by remembering the last host in the list to be given a task
        and starting the search from there as opposed to the top of the hosts
        list again, which would end up favoring hosts near the beginning of the
        list.
        '''

        # the last host to be given a task
        last_host = 0

        result = True

        work_to_do = True
        while work_to_do and not self._tqm._terminated:

            hosts_left = [
                host
                for host in self._inventory.get_hosts(iterator._play.hosts)
                if host.name not in self._tqm._unreachable_hosts
            ]
            if len(hosts_left) == 0:
                self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
                result = False
                break

            work_to_do = False  # assume we have no more work to do
            starting_host = last_host  # save current position so we know when we've
            # looped back around and need to break

            # try and find an unblocked host with a task to run
            host_results = []
            while True:
                host = hosts_left[last_host]
                display.debug("next free host: %s" % host)
                host_name = host.get_name()

                # peek at the next task for the host, to see if there's
                # anything to do do for this host
                (state, task) = iterator.get_next_task_for_host(host,
                                                                peek=True)
                display.debug("free host state: %s" % state)
                display.debug("free host task: %s" % task)
                if host_name not in self._tqm._unreachable_hosts and task:

                    # set the flag so the outer loop knows we've still found
                    # some work which needs to be done
                    work_to_do = True

                    display.debug("this host has work to do")

                    # check to see if this host is blocked (still executing a previous task)
                    if host_name not in self._blocked_hosts or not self._blocked_hosts[
                            host_name]:
                        # pop the task, mark the host blocked, and queue it
                        self._blocked_hosts[host_name] = True
                        (state, task) = iterator.get_next_task_for_host(host)

                        try:
                            action = action_loader.get(task.action,
                                                       class_only=True)
                        except KeyError:
                            # we don't care here, because the action may simply not have a
                            # corresponding action plugin
                            action = None

                        display.debug("getting variables")
                        task_vars = self._variable_manager.get_vars(
                            loader=self._loader,
                            play=iterator._play,
                            host=host,
                            task=task)
                        self.add_tqm_variables(task_vars, play=iterator._play)
                        templar = Templar(loader=self._loader,
                                          variables=task_vars)
                        display.debug("done getting variables")

                        run_once = templar.template(
                            task.run_once) or action and getattr(
                                action, 'BYPASS_HOST_LOOP', False)
                        if run_once:
                            if action and getattr(action, 'BYPASS_HOST_LOOP',
                                                  False):
                                raise AnsibleError("The '%s' module bypasses the host loop, which is currently not supported in the free strategy " \
                                                   "and would instead execute for every host in the inventory list." % task.action, obj=task._ds)
                            else:
                                display.warning("Using run_once with the free strategy is not currently supported. This task will still be " \
                                                "executed for every host in the inventory list.")

                        # check to see if this task should be skipped, due to it being a member of a
                        # role which has already run (and whether that role allows duplicate execution)
                        if task._role and task._role.has_run(host):
                            # If there is no metadata, the default behavior is to not allow duplicates,
                            # if there is metadata, check to see if the allow_duplicates flag was set to true
                            if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
                                display.debug(
                                    "'%s' skipped because role has already run"
                                    % task)
                                del self._blocked_hosts[host_name]
                                continue

                        if task.action == 'meta':
                            self._execute_meta(task, play_context, iterator)
                            self._blocked_hosts[host_name] = False
                        else:
                            # handle step if needed, skip meta actions as they are used internally
                            if not self._step or self._take_step(
                                    task, host_name):
                                if task.any_errors_fatal:
                                    display.warning(
                                        "Using any_errors_fatal with the free strategy is not supported, as tasks are executed independently on each host"
                                    )
                                self._tqm.send_callback(
                                    'v2_playbook_on_task_start',
                                    task,
                                    is_conditional=False)
                                self._queue_task(host, task, task_vars,
                                                 play_context)
                    else:
                        display.debug("%s is blocked, skipping for now" %
                                      host_name)

                # move on to the next host and make sure we
                # haven't gone past the end of our hosts list
                last_host += 1
                if last_host > len(hosts_left) - 1:
                    last_host = 0

                # if we've looped around back to the start, break out
                if last_host == starting_host:
                    break

            results = self._process_pending_results(iterator)
            host_results.extend(results)

            try:
                included_files = IncludedFile.process_include_results(
                    host_results,
                    self._tqm,
                    iterator=iterator,
                    inventory=self._inventory,
                    loader=self._loader,
                    variable_manager=self._variable_manager)
            except AnsibleError as e:
                return False

            if len(included_files) > 0:
                all_blocks = dict((host, []) for host in hosts_left)
                for included_file in included_files:
                    display.debug("collecting new blocks for %s" %
                                  included_file)
                    try:
                        new_blocks = self._load_included_file(
                            included_file, iterator=iterator)
                    except AnsibleError as e:
                        for host in included_file._hosts:
                            iterator.mark_host_failed(host)
                        display.warning(str(e))
                        continue

                    for new_block in new_blocks:
                        task_vars = self._variable_manager.get_vars(
                            loader=self._loader,
                            play=iterator._play,
                            task=included_file._task)
                        final_block = new_block.filter_tagged_tasks(
                            play_context, task_vars)
                        for host in hosts_left:
                            if host in included_file._hosts:
                                all_blocks[host].append(final_block)
                    display.debug("done collecting new blocks for %s" %
                                  included_file)

                display.debug(
                    "adding all collected blocks from %d included file(s) to iterator"
                    % len(included_files))
                for host in hosts_left:
                    iterator.add_tasks(host, all_blocks[host])
                display.debug("done adding collected blocks to iterator")

            # pause briefly so we don't spin lock
            time.sleep(0.001)

        # collect all the final results
        results = self._wait_on_pending_results(iterator)

        # run the base class run() method, which executes the cleanup function
        # and runs any outstanding handlers which have been triggered
        return super(StrategyModule, self).run(iterator, play_context, result)
Exemple #50
0
    def run(self):

        super(PlaybookCLI, self).run()

        # Note: slightly wrong, this is written so that implicit localhost
        # Manage passwords
        sshpass = None
        becomepass = None
        vault_pass = None
        passwords = {}

        # don't deal with privilege escalation or passwords when we don't need to
        if not self.options.listhosts and not self.options.listtasks and not self.options.listtags and not self.options.syntax:
            self.normalize_become_options()
            (sshpass, becomepass) = self.ask_passwords()
            passwords = {'conn_pass': sshpass, 'become_pass': becomepass}

        loader = DataLoader()

        if self.options.vault_password_file:
            # read vault_pass from a file
            vault_pass = CLI.read_vault_password_file(
                self.options.vault_password_file, loader=loader)
            loader.set_vault_password(vault_pass)
        elif self.options.ask_vault_pass:
            vault_pass = self.ask_vault_passwords()[0]
            loader.set_vault_password(vault_pass)

        # initial error check, to make sure all specified playbooks are accessible
        # before we start running anything through the playbook executor
        for playbook in self.args:
            if not os.path.exists(playbook):
                raise AnsibleError("the playbook: %s could not be found" %
                                   playbook)
            if not (os.path.isfile(playbook)
                    or stat.S_ISFIFO(os.stat(playbook).st_mode)):
                raise AnsibleError(
                    "the playbook: %s does not appear to be a file" % playbook)

        # create the variable manager, which will be shared throughout
        # the code, ensuring a consistent view of global variables
        variable_manager = VariableManager()
        variable_manager.extra_vars = load_extra_vars(loader=loader,
                                                      options=self.options)

        # create the inventory, and filter it based on the subset specified (if any)
        inventory = Inventory(loader=loader,
                              variable_manager=variable_manager,
                              host_list=self.options.inventory)
        variable_manager.set_inventory(inventory)

        # (which is not returned in list_hosts()) is taken into account for
        # warning if inventory is empty.  But it can't be taken into account for
        # checking if limit doesn't match any hosts.  Instead we don't worry about
        # limit if only implicit localhost was in inventory to start with.
        #
        # Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts())
        no_hosts = False
        if len(inventory.list_hosts()) == 0:
            # Empty inventory
            display.warning(
                "provided hosts list is empty, only localhost is available")
            no_hosts = True
        inventory.subset(self.options.subset)
        if len(inventory.list_hosts()) == 0 and no_hosts is False:
            # Invalid limit
            raise AnsibleError("Specified --limit does not match any hosts")

        # create the playbook executor, which manages running the plays via a task queue manager
        pbex = PlaybookExecutor(playbooks=self.args,
                                inventory=inventory,
                                variable_manager=variable_manager,
                                loader=loader,
                                options=self.options,
                                passwords=passwords)

        results = pbex.run()

        if isinstance(results, list):
            for p in results:

                display.display('\nplaybook: %s' % p['playbook'])
                i = 1
                for play in p['plays']:
                    if play.name:
                        playname = play.name
                    else:
                        playname = '#' + str(i)

                    msg = "\n  PLAY: %s" % (playname)
                    mytags = set()
                    if self.options.listtags and play.tags:
                        mytags = mytags.union(set(play.tags))
                        msg += '    TAGS: [%s]' % (','.join(mytags))

                    if self.options.listhosts:
                        playhosts = set(inventory.get_hosts(play.hosts))
                        msg += "\n    pattern: %s\n    hosts (%d):" % (
                            play.hosts, len(playhosts))
                        for host in playhosts:
                            msg += "\n      %s" % host

                    display.display(msg)

                    if self.options.listtags or self.options.listtasks:
                        taskmsg = '    tasks:'

                        for block in play.compile():
                            if not block.has_tasks():
                                continue

                            j = 1
                            for task in block.block:
                                taskmsg += "\n      %s" % task
                                if self.options.listtags and task.tags:
                                    taskmsg += "    TAGS: [%s]" % ','.join(
                                        mytags.union(set(task.tags)))
                                j = j + 1

                        display.display(taskmsg)

                    i = i + 1
            return 0
        else:
            return results
Exemple #51
0
    def parse_inventory(self, host_list):

        if isinstance(host_list, string_types):
            if "," in host_list:
                host_list = host_list.split(",")
                host_list = [h for h in host_list if h and h.strip()]

        self.parser = None

        # Always create the 'all' and 'ungrouped' groups, even if host_list is
        # empty: in this case we will subsequently an the implicit 'localhost' to it.

        ungrouped = Group('ungrouped')
        all = Group('all')
        all.add_child_group(ungrouped)

        self.groups = dict(all=all, ungrouped=ungrouped)

        if host_list is None:
            pass
        elif isinstance(host_list, list):
            for h in host_list:
                try:
                    (host, port) = parse_address(h, allow_ranges=False)
                except AnsibleError as e:
                    display.vvv(
                        "Unable to parse address from hostname, leaving unchanged: %s"
                        % to_unicode(e))
                    host = h
                    port = None
                all.add_host(Host(host, port))
        elif self._loader.path_exists(host_list):
            #TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins'
            if self.is_directory(host_list):
                # Ensure basedir is inside the directory
                host_list = os.path.join(self.host_list, "")
                self.parser = InventoryDirectory(loader=self._loader,
                                                 groups=self.groups,
                                                 filename=host_list)
            else:
                self.parser = get_file_parser(host_list, self.groups,
                                              self._loader)
                vars_loader.add_directory(self.basedir(), with_subdir=True)

            if not self.parser:
                # should never happen, but JIC
                raise AnsibleError(
                    "Unable to parse %s as an inventory source" % host_list)
        else:
            display.warning("Host file not found: %s" % to_unicode(host_list))

        self._vars_plugins = [x for x in vars_loader.all(self)]

        # set group vars from group_vars/ files and vars plugins
        for g in self.groups:
            group = self.groups[g]
            group.vars = combine_vars(group.vars,
                                      self.get_group_variables(group.name))

        # set host vars from host_vars/ files and vars plugins
        for host in self.get_hosts():
            host.vars = combine_vars(host.vars,
                                     self.get_host_variables(host.name))
Exemple #52
0
    def run(self, tmp=None, task_vars=None):
        del tmp  # tmp no longer has any effect

        module = module_loader._load_module_source(
            self._task.action, module_loader.find_plugin(self._task.action))
        if not getattr(module, 'USE_PERSISTENT_CONNECTION', False):
            return super(ActionModule, self).run(task_vars=task_vars)

        socket_path = None

        if self._play_context.connection == 'local':
            provider = load_provider(junos_provider_spec, self._task.args)
            pc = copy.deepcopy(self._play_context)
            pc.network_os = 'junos'
            pc.remote_addr = provider['host'] or self._play_context.remote_addr

            if provider[
                    'transport'] == 'cli' and self._task.action not in CLI_SUPPORTED_MODULES:
                return {
                    'failed':
                    True,
                    'msg':
                    "Transport type '%s' is not valid for '%s' module. "
                    "Please see https://docs.ansible.com/ansible/latest/network/user_guide/platform_junos.html"
                    % (provider['transport'], self._task.action)
                }

            if self._task.action == 'junos_netconf' or (
                    provider['transport'] == 'cli'
                    and self._task.action == 'junos_command'):
                pc.connection = 'network_cli'
                pc.port = int(provider['port'] or self._play_context.port
                              or 22)
            else:
                pc.connection = 'netconf'
                pc.port = int(provider['port'] or self._play_context.port
                              or 830)

            pc.remote_user = provider[
                'username'] or self._play_context.connection_user
            pc.password = provider['password'] or self._play_context.password
            pc.private_key_file = provider[
                'ssh_keyfile'] or self._play_context.private_key_file
            pc.timeout = int(
                provider['timeout']) if provider['timeout'] else None

            display.vvv(
                'using connection plugin %s (was local)' % pc.connection,
                pc.remote_addr)
            connection = self._shared_loader_obj.connection_loader.get(
                'persistent', pc, sys.stdin)

            if connection._play_context.timeout is None:
                connection._play_context.timeout = connection.get_option(
                    'persistent_command_timeout')

            socket_path = connection.run()
            display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
            if not socket_path:
                return {
                    'failed':
                    True,
                    'msg':
                    'unable to open shell. Please see: ' +
                    'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'
                }

            task_vars['ansible_socket'] = socket_path
        elif self._play_context.connection in ('netconf', 'network_cli'):
            provider = self._task.args.get('provider', {})
            if any(provider.values()):
                display.warning(
                    'provider is unnecessary when using %s and will be ignored'
                    % self._play_context.connection)
                del self._task.args['provider']

            if (self._play_context.connection == 'network_cli' and self._task.action not in CLI_SUPPORTED_MODULES) or \
                    (self._play_context.connection == 'netconf' and self._task.action == 'junos_netconf'):
                return {
                    'failed':
                    True,
                    'msg':
                    "Connection type '%s' is not valid for '%s' module. "
                    "Please see https://docs.ansible.com/ansible/latest/network/user_guide/platform_junos.html"
                    % (self._play_context.connection, self._task.action)
                }

        if (self._play_context.connection == 'local'
                and pc.connection == 'network_cli'
            ) or self._play_context.connection == 'network_cli':
            # make sure we are in the right cli context which should be
            # enable mode and not config module
            if socket_path is None:
                socket_path = self._connection.socket_path

            conn = Connection(socket_path)
            out = conn.get_prompt()
            while to_text(
                    out,
                    errors='surrogate_then_replace').strip().endswith('#'):
                display.vvvv('wrong context, sending exit to device',
                             self._play_context.remote_addr)
                conn.send_command('exit')
                out = conn.get_prompt()

        result = super(ActionModule, self).run(None, task_vars)
        return result
Exemple #53
0
    def _run_loop(self, items):
        '''
        Runs the task with the loop items specified and collates the result
        into an array named 'results' which is inserted into the final result
        along with the item for which the loop ran.
        '''

        results = []

        # make copies of the job vars and task so we can add the item to
        # the variables and re-validate the task with the item variable
        # task_vars = self._job_vars.copy()
        task_vars = self._job_vars

        loop_var = 'item'
        index_var = None
        label = None
        loop_pause = 0
        templar = Templar(loader=self._loader,
                          shared_loader_obj=self._shared_loader_obj,
                          variables=self._job_vars)

        # FIXME: move this to the object itself to allow post_validate to take care of templating (loop_control.post_validate)
        if self._task.loop_control:
            loop_var = templar.template(self._task.loop_control.loop_var)
            index_var = templar.template(self._task.loop_control.index_var)
            loop_pause = templar.template(self._task.loop_control.pause)

            # This may be 'None',so it is tempalted below after we ensure a value and an item is assigned
            label = self._task.loop_control.label

        # ensure we always have a label
        if label is None:
            label = '{{' + loop_var + '}}'

        if loop_var in task_vars:
            display.warning(
                u"The loop variable '%s' is already in use. "
                u"You should set the `loop_var` value in the `loop_control` option for the task"
                u" to something else to avoid variable collisions and unexpected behavior."
                % loop_var)

        ran_once = False
        if self._task.loop_with:
            # Only squash with 'with_:' not with the 'loop:', 'magic' squashing can be removed once with_ loops are
            items = self._squash_items(items, loop_var, task_vars)

        for item_index, item in enumerate(items):
            task_vars[loop_var] = item
            if index_var:
                task_vars[index_var] = item_index

            # Update template vars to reflect current loop iteration
            templar.set_available_variables(task_vars)

            # pause between loop iterations
            if loop_pause and ran_once:
                try:
                    time.sleep(float(loop_pause))
                except ValueError as e:
                    raise AnsibleError(
                        'Invalid pause value: %s, produced error: %s' %
                        (loop_pause, to_native(e)))
            else:
                ran_once = True

            try:
                tmp_task = self._task.copy(exclude_parent=True,
                                           exclude_tasks=True)
                tmp_task._parent = self._task._parent
                tmp_play_context = self._play_context.copy()
            except AnsibleParserError as e:
                results.append(dict(failed=True, msg=to_text(e)))
                continue

            # now we swap the internal task and play context with their copies,
            # execute, and swap them back so we can do the next iteration cleanly
            (self._task, tmp_task) = (tmp_task, self._task)
            (self._play_context, tmp_play_context) = (tmp_play_context,
                                                      self._play_context)
            res = self._execute(variables=task_vars)
            task_fields = self._task.dump_attrs()
            (self._task, tmp_task) = (tmp_task, self._task)
            (self._play_context, tmp_play_context) = (tmp_play_context,
                                                      self._play_context)

            # now update the result with the item info, and append the result
            # to the list of results
            res[loop_var] = item
            if index_var:
                res[index_var] = item_index
            res['_ansible_item_result'] = True
            res['_ansible_ignore_errors'] = task_fields.get('ignore_errors')

            # gets templated here unlike rest of loop_control fields, depends on loop_var above
            res['_ansible_item_label'] = templar.template(label, cache=False)

            self._rslt_q.put(
                TaskResult(
                    self._host.name,
                    self._task._uuid,
                    res,
                    task_fields=task_fields,
                ),
                block=False,
            )
            results.append(res)
            del task_vars[loop_var]

        return results
Exemple #54
0
    def _execute_module(self,
                        module_name=None,
                        module_args=None,
                        tmp=None,
                        task_vars=None,
                        persist_files=False,
                        delete_remote_tmp=True):
        '''
        Transfer and run a module along with its arguments.
        '''
        if task_vars is None:
            task_vars = dict()

        # if a module name was not specified for this execution, use
        # the action from the task
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args

        # set check mode in the module arguments, if required
        if self._play_context.check_mode:
            if not self._supports_check_mode:
                raise AnsibleError(
                    "check mode is not supported for this operation")
            module_args['_ansible_check_mode'] = True
        else:
            module_args['_ansible_check_mode'] = False

        # Get the connection user for permission checks
        remote_user = task_vars.get(
            'ansible_ssh_user') or self._play_context.remote_user

        # set no log in the module arguments, if required
        module_args[
            '_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG

        # set debug in the module arguments, if required
        module_args['_ansible_debug'] = C.DEFAULT_DEBUG

        # let module know we are in diff mode
        module_args['_ansible_diff'] = self._play_context.diff

        # let module know our verbosity
        module_args['_ansible_verbosity'] = display.verbosity

        # give the module information about the ansible version
        module_args['_ansible_version'] = __version__

        # set the syslog facility to be used in the module
        module_args['_ansible_syslog_facility'] = task_vars.get(
            'ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY)

        # let module know about filesystems that selinux treats specially
        module_args[
            '_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS

        (module_style, shebang, module_data,
         module_path) = self._configure_module(module_name=module_name,
                                               module_args=module_args,
                                               task_vars=task_vars)
        display.vvv("Using module file %s" % module_path)
        if not shebang and module_style != 'binary':
            raise AnsibleError("module (%s) is missing interpreter line" %
                               module_name)

        # a remote tmp path may be necessary and not already created
        remote_module_path = None
        args_file_path = None
        if not tmp and self._late_needs_tmp_path(tmp, module_style):
            tmp = self._make_tmp_path(remote_user)

        if tmp:
            remote_module_filename = self._connection._shell.get_remote_filename(
                module_path)
            remote_module_path = self._connection._shell.join_path(
                tmp, remote_module_filename)
            if module_style in ('old', 'non_native_want_json', 'binary'):
                # we'll also need a temp file to hold our module arguments
                args_file_path = self._connection._shell.join_path(tmp, 'args')

        if remote_module_path or module_style != 'new':
            display.debug("transferring module to remote %s" %
                          remote_module_path)
            if module_style == 'binary':
                self._transfer_file(module_path, remote_module_path)
            else:
                self._transfer_data(remote_module_path, module_data)
            if module_style == 'old':
                # we need to dump the module args to a k=v string in a file on
                # the remote system, which can be read and parsed by the module
                args_data = ""
                for k, v in iteritems(module_args):
                    args_data += '%s="%s" ' % (k, pipes.quote(text_type(v)))
                self._transfer_data(args_file_path, args_data)
            elif module_style in ('non_native_want_json', 'binary'):
                self._transfer_data(args_file_path, json.dumps(module_args))
            display.debug("done transferring module to remote")

        environment_string = self._compute_environment_string()

        # Fix permissions of the tmp path and tmp files.  This should be
        # called after all files have been transferred.
        self._fixup_perms(tmp, remote_user, recursive=True)

        cmd = ""
        in_data = None

        if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':
            in_data = module_data
        else:
            if remote_module_path:
                cmd = remote_module_path

        rm_tmp = None
        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if not self._play_context.become or self._play_context.become_user == 'root':
                # not sudoing or sudoing to root, so can cleanup files in the same step
                rm_tmp = tmp

        cmd = self._connection._shell.build_module_command(
            environment_string,
            shebang,
            cmd,
            arg_path=args_file_path,
            rm_tmp=rm_tmp)
        cmd = cmd.strip()

        sudoable = True
        if module_name == "accelerate":
            # always run the accelerate module as the user
            # specified in the play, not the sudo_user
            sudoable = False

        res = self._low_level_execute_command(cmd,
                                              sudoable=sudoable,
                                              in_data=in_data)

        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if self._play_context.become and self._play_context.become_user != 'root':
                # not sudoing to root, so maybe can't delete files as that other user
                # have to clean up temp files as original user in a second step
                tmp_rm_cmd = self._connection._shell.remove(tmp, recurse=True)
                tmp_rm_res = self._low_level_execute_command(tmp_rm_cmd,
                                                             sudoable=False)
                tmp_rm_data = self._parse_returned_data(tmp_rm_res)
                if tmp_rm_data.get('rc', 0) != 0:
                    display.warning(
                        'Error deleting remote temporary files (rc: {0}, stderr: {1})'
                        .format(
                            tmp_rm_res.get('rc'),
                            tmp_rm_res.get('stderr',
                                           'No error string available.')))

        # parse the main result
        data = self._parse_returned_data(res)

        # pre-split stdout into lines, if stdout is in the data and there
        # isn't already a stdout_lines value there
        if 'stdout' in data and 'stdout_lines' not in data:
            data['stdout_lines'] = data.get('stdout', u'').splitlines()

        display.debug("done with _execute_module (%s, %s)" %
                      (module_name, module_args))
        return data
    def _do_handler_run(self,
                        handler,
                        handler_name,
                        iterator,
                        play_context,
                        notified_hosts=None):

        # FIXME: need to use iterator.get_failed_hosts() instead?
        #if not len(self.get_hosts_remaining(iterator._play)):
        #    self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
        #    result = False
        #    break
        saved_name = handler.name
        handler.name = handler_name
        self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
        handler.name = saved_name

        if notified_hosts is None:
            notified_hosts = self._notified_handlers[handler._uuid]

        run_once = False
        try:
            action = action_loader.get(handler.action, class_only=True)
            if handler.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
                run_once = True
        except KeyError:
            # we don't care here, because the action may simply not have a
            # corresponding action plugin
            pass

        host_results = []
        for host in notified_hosts:
            if not handler.has_triggered(host) and (
                    not iterator.is_failed(host)
                    or play_context.force_handlers):
                task_vars = self._variable_manager.get_vars(
                    loader=self._loader,
                    play=iterator._play,
                    host=host,
                    task=handler)
                self.add_tqm_variables(task_vars, play=iterator._play)
                self._queue_task(host, handler, task_vars, play_context)
                if run_once:
                    break

        # collect the results from the handler run
        host_results = self._wait_on_pending_results(iterator)

        try:
            included_files = IncludedFile.process_include_results(
                host_results,
                self._tqm,
                iterator=iterator,
                inventory=self._inventory,
                loader=self._loader,
                variable_manager=self._variable_manager)
        except AnsibleError as e:
            return False

        result = True
        if len(included_files) > 0:
            for included_file in included_files:
                try:
                    new_blocks = self._load_included_file(included_file,
                                                          iterator=iterator,
                                                          is_handler=True)
                    # for every task in each block brought in by the include, add the list
                    # of hosts which included the file to the notified_handlers dict
                    for block in new_blocks:
                        iterator._play.handlers.append(block)
                        iterator.cache_block_tasks(block)
                        for task in block.block:
                            result = self._do_handler_run(
                                handler=task,
                                handler_name=None,
                                iterator=iterator,
                                play_context=play_context,
                                notified_hosts=included_file._hosts[:],
                            )
                            if not result:
                                break
                except AnsibleError as e:
                    for host in included_file._hosts:
                        iterator.mark_host_failed(host)
                        self._tqm._failed_hosts[host.name] = True
                    display.warning(str(e))
                    continue

        # wipe the notification list
        self._notified_handlers[handler._uuid] = []
        display.debug("done running handlers, result is: %s" % result)
        return result
Exemple #56
0
    def run(self, tmp=None, task_vars=None):
        del tmp  # tmp no longer has any effect

        socket_path = None

        if self._play_context.connection == 'network_cli':
            provider = self._task.args.get('provider', {})
            if any(provider.values()):
                display.warning('provider is unnecessary when using network_cli and will be ignored')
                del self._task.args['provider']
            if self._task.args.get('transport'):
                display.warning('transport is unnecessary when using network_cli and will be ignored')
                del self._task.args['transport']
        elif self._play_context.connection == 'local':
            provider = load_provider(eos_provider_spec, self._task.args)
            transport = provider['transport'] or 'cli'

            display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr)

            if transport == 'cli':
                pc = copy.deepcopy(self._play_context)
                pc.connection = 'network_cli'
                pc.network_os = 'eos'
                pc.remote_addr = provider['host'] or self._play_context.remote_addr
                pc.port = int(provider['port'] or self._play_context.port or 22)
                pc.remote_user = provider['username'] or self._play_context.connection_user
                pc.password = provider['password'] or self._play_context.password
                pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
                pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
                pc.become = provider['authorize'] or False
                if pc.become:
                    pc.become_method = 'enable'
                pc.become_pass = provider['auth_pass']

                display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
                connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)

                socket_path = connection.run()
                display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
                if not socket_path:
                    return {'failed': True,
                            'msg': 'unable to open shell. Please see: ' +
                                   'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}

                task_vars['ansible_socket'] = socket_path

            else:
                self._task.args['provider'] = ActionModule.eapi_implementation(provider, self._play_context)
        else:
            return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}

        if (self._play_context.connection == 'local' and transport == 'cli') or self._play_context.connection == 'network_cli':
            # make sure we are in the right cli context which should be
            # enable mode and not config module
            if socket_path is None:
                socket_path = self._connection.socket_path

            conn = Connection(socket_path)
            out = conn.get_prompt()
            while '(config' in to_text(out, errors='surrogate_then_replace').strip():
                display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
                conn.send_command('abort')
                out = conn.get_prompt()

        result = super(ActionModule, self).run(task_vars=task_vars)
        return result
Exemple #57
0
    def _file_transport_command(self, in_path, out_path, sftp_action):
        # scp and sftp require square brackets for IPv6 addresses, but
        # accept them for hostnames and IPv4 addresses too.
        host = '[%s]' % self.host

        # since this can be a non-bool now, we need to handle it correctly
        scp_if_ssh = C.DEFAULT_SCP_IF_SSH
        if not isinstance(scp_if_ssh, bool):
            scp_if_ssh = scp_if_ssh.lower()
            if scp_if_ssh in BOOLEANS:
                scp_if_ssh = boolean(scp_if_ssh)
            elif scp_if_ssh != 'smart':
                raise AnsibleOptionsError(
                    'scp_if_ssh needs to be one of [smart|True|False]')

        # create a list of commands to use based on config options
        methods = ['sftp']
        if scp_if_ssh == 'smart':
            methods.append('scp')
        elif scp_if_ssh:
            methods = ['scp']

        success = False
        res = None
        for method in methods:
            if method == 'sftp':
                cmd = self._build_command('sftp', to_bytes(host))
                in_data = u"{0} {1} {2}\n".format(sftp_action,
                                                  pipes.quote(in_path),
                                                  pipes.quote(out_path))
            elif method == 'scp':
                if sftp_action == 'get':
                    cmd = self._build_command(
                        'scp', u'{0}:{1}'.format(host, pipes.quote(in_path)),
                        out_path)
                else:
                    cmd = self._build_command(
                        'scp', in_path,
                        u'{0}:{1}'.format(host, pipes.quote(out_path)))
                in_data = None

            in_data = to_bytes(in_data, nonstring='passthru')
            (returncode, stdout, stderr) = self._run(cmd,
                                                     in_data,
                                                     checkrc=False)
            # Check the return code and rollover to next method if failed
            if returncode == 0:
                success = True
                break
            else:
                # If not in smart mode, the data will be printed by the raise below
                if scp_if_ssh == 'smart':
                    display.warning(
                        msg=
                        '%s transfer mechanism failed on %s. Use ANSIBLE_DEBUG=1 to see detailed information'
                        % (method, host))
                    display.debug(msg='%s' % to_native(stdout))
                    display.debug(msg='%s' % to_native(stderr))
                res = (returncode, stdout, stderr)

        if not success:
            raise AnsibleError("failed to transfer file to {0}:\n{1}\n{2}"\
                    .format(to_native(out_path), to_native(res[1]), to_native(res[2])))
Exemple #58
0
    def run(self):

        super(ConsoleCLI, self).run()

        sshpass = None
        becomepass = None
        vault_pass = None

        # hosts
        if len(self.args) != 1:
            self.pattern = 'all'
        else:
            self.pattern = self.args[0]
        self.options.cwd = self.pattern

        # dynamically add modules as commands
        self.modules = self.list_modules()
        for module in self.modules:
            setattr(
                self,
                'do_' + module,
                lambda arg, module=module: self.default(module + ' ' + arg))
            setattr(self,
                    'help_' + module,
                    lambda module=module: self.helpdefault(module))

        self.normalize_become_options()
        (sshpass, becomepass) = self.ask_passwords()
        self.passwords = {'conn_pass': sshpass, 'become_pass': becomepass}

        self.loader = DataLoader()

        if self.options.vault_password_file:
            # read vault_pass from a file
            vault_pass = CLI.read_vault_password_file(
                self.options.vault_password_file, loader=self.loader)
            self.loader.set_vault_password(vault_pass)
        elif self.options.ask_vault_pass:
            vault_pass = self.ask_vault_passwords()
            self.loader.set_vault_password(vault_pass)

        self.variable_manager = VariableManager()
        self.inventory = Inventory(loader=self.loader,
                                   variable_manager=self.variable_manager,
                                   host_list=self.options.inventory)
        self.variable_manager.set_inventory(self.inventory)

        no_hosts = False
        if len(self.inventory.list_hosts()) == 0:
            # Empty inventory
            no_hosts = True
            display.warning(
                "provided hosts list is empty, only localhost is available")

        self.inventory.subset(self.options.subset)
        hosts = self.inventory.list_hosts(self.pattern)
        if len(hosts) == 0 and not no_hosts:
            raise AnsibleError(
                "Specified hosts and/or --limit does not match any hosts")

        self.groups = self.inventory.list_groups()
        self.hosts = [x.name for x in hosts]

        # This hack is to work around readline issues on a mac:
        #  http://stackoverflow.com/a/7116997/541202
        if 'libedit' in readline.__doc__:
            readline.parse_and_bind("bind ^I rl_complete")
        else:
            readline.parse_and_bind("tab: complete")

        histfile = os.path.join(os.path.expanduser("~"),
                                ".ansible-console_history")
        try:
            readline.read_history_file(histfile)
        except IOError:
            pass

        atexit.register(readline.write_history_file, histfile)
        self.set_prompt()
        self.cmdloop()
Exemple #59
0
 def reset(self):
     display.warning("Reset is not implemented for this connection")
Exemple #60
0
    def run(self, tmp=None, task_vars=None):

        self._supports_check_mode = True
        self._supports_async = True

        if self._play_context.check_mode:
            return dict(changed=True, elapsed=0, rebooted=True)

        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        if result.get('skipped', False) or result.get('failed', False):
            return result

        # Handle timeout parameters and its alias
        deprecated_args = {
            'shutdown_timeout': '2.5',
            'shutdown_timeout_sec': '2.5',
        }
        for arg, version in deprecated_args.items():
            if self._task.args.get(arg) is not None:
                display.warning("Since Ansible %s, %s is no longer used with win_reboot" % (arg, version))

        if self._task.args.get('connect_timeout') is not None:
            connect_timeout = int(self._task.args.get('connect_timeout', self.DEFAULT_CONNECT_TIMEOUT))
        else:
            connect_timeout = int(self._task.args.get('connect_timeout_sec', self.DEFAULT_CONNECT_TIMEOUT))

        if self._task.args.get('reboot_timeout') is not None:
            reboot_timeout = int(self._task.args.get('reboot_timeout', self.DEFAULT_REBOOT_TIMEOUT))
        else:
            reboot_timeout = int(self._task.args.get('reboot_timeout_sec', self.DEFAULT_REBOOT_TIMEOUT))

        if self._task.args.get('pre_reboot_delay') is not None:
            pre_reboot_delay = int(self._task.args.get('pre_reboot_delay', self.DEFAULT_PRE_REBOOT_DELAY))
        else:
            pre_reboot_delay = int(self._task.args.get('pre_reboot_delay_sec', self.DEFAULT_PRE_REBOOT_DELAY))

        if self._task.args.get('post_reboot_delay') is not None:
            post_reboot_delay = int(self._task.args.get('post_reboot_delay', self.DEFAULT_POST_REBOOT_DELAY))
        else:
            post_reboot_delay = int(self._task.args.get('post_reboot_delay_sec', self.DEFAULT_POST_REBOOT_DELAY))

        test_command = str(self._task.args.get('test_command', self.DEFAULT_TEST_COMMAND))
        msg = str(self._task.args.get('msg', self.DEFAULT_REBOOT_MESSAGE))

        # Get current uptime
        try:
            before_uptime = self.get_system_uptime()
        except Exception as e:
            result['failed'] = True
            result['reboot'] = False
            result['msg'] = to_native(e)
            return result

        # Initiate reboot
        display.vvv("rebooting server")
        (rc, stdout, stderr) = self._connection.exec_command('shutdown /r /t %d /c "%s"' % (pre_reboot_delay, msg))

        # Test for "A system shutdown has already been scheduled. (1190)" and handle it gracefully
        if rc == 1190:
            display.warning('A scheduled reboot was pre-empted by Ansible.')

            # Try to abort (this may fail if it was already aborted)
            (rc, stdout1, stderr1) = self._connection.exec_command('shutdown /a')

            # Initiate reboot again
            (rc, stdout2, stderr2) = self._connection.exec_command('shutdown /r /t %d' % pre_reboot_delay)
            stdout += stdout1 + stdout2
            stderr += stderr1 + stderr2

        if rc != 0:
            result['failed'] = True
            result['rebooted'] = False
            result['msg'] = "Shutdown command failed, error text was %s" % stderr
            return result

        start = datetime.now()
        # Get the original connection_timeout option var so it can be reset after
        connection_timeout_orig = None
        try:
            connection_timeout_orig = self._connection.get_option('connection_timeout')
        except AnsibleError:
            display.debug("win_reboot: connection_timeout connection option has not been set")

        try:
            # keep on checking system uptime with short connection responses
            def check_uptime():
                display.vvv("attempting to get system uptime")

                # override connection timeout from defaults to custom value
                try:
                    self._connection.set_options(direct={"connection_timeout": connect_timeout})
                    self._connection._reset()
                except AttributeError:
                    display.warning("Connection plugin does not allow the connection timeout to be overridden")

                # try and get uptime
                try:
                    current_uptime = self.get_system_uptime()
                except Exception as e:
                    raise e

                if current_uptime == before_uptime:
                    raise Exception("uptime has not changed")

            self.do_until_success_or_timeout(check_uptime, reboot_timeout, what_desc="reboot uptime check success")

            # reset the connection to clear the custom connection timeout
            try:
                self._connection.set_options(direct={"connection_timeout": connection_timeout_orig})
                self._connection._reset()
            except (AnsibleError, AttributeError):
                display.debug("Failed to reset connection_timeout back to default")

            # finally run test command to ensure everything is working
            def run_test_command():
                display.vvv("attempting post-reboot test command '%s'" % test_command)
                (rc, stdout, stderr) = self._connection.exec_command(test_command)

                if rc != 0:
                    raise Exception('test command failed')

            # FUTURE: add a stability check (system must remain up for N seconds) to deal with self-multi-reboot updates

            self.do_until_success_or_timeout(run_test_command, reboot_timeout, what_desc="post-reboot test command success")

            result['rebooted'] = True
            result['changed'] = True

        except TimedOutException as toex:
            result['failed'] = True
            result['rebooted'] = True
            result['msg'] = to_native(toex)

        if post_reboot_delay != 0:
            display.vvv("win_reboot: waiting an additional %d seconds" % post_reboot_delay)
            time.sleep(post_reboot_delay)

        elapsed = datetime.now() - start
        result['elapsed'] = elapsed.seconds

        return result