Exemple #1
0
    def find_plugin(self, name, suffixes=None):
        ''' Find a plugin named name '''

        if not suffixes:
            if self.class_name:
                suffixes = ['.py']
            else:
                suffixes = ['.py', '']

        potential_names = frozenset('%s%s' % (name, s) for s in suffixes)
        for full_name in potential_names:
            if full_name in self._plugin_path_cache:
                return self._plugin_path_cache[full_name]

        found = None
        for path in [
                p for p in self._get_paths() if p not in self._searched_paths
        ]:
            if os.path.isdir(path):
                try:
                    full_paths = (os.path.join(path, f)
                                  for f in os.listdir(path))
                except OSError as e:
                    d = Display()
                    d.warning("Error accessing plugin paths: %s" % str(e))
                for full_path in (f for f in full_paths if os.path.isfile(f)):
                    for suffix in suffixes:
                        if full_path.endswith(suffix):
                            full_name = os.path.basename(full_path)
                            break
                    else:  # Yes, this is a for-else: http://bit.ly/1ElPkyg
                        continue

                    if full_name not in self._plugin_path_cache:
                        self._plugin_path_cache[full_name] = full_path

            self._searched_paths.add(path)
            for full_name in potential_names:
                if full_name in self._plugin_path_cache:
                    return self._plugin_path_cache[full_name]

        # if nothing is found, try finding alias/deprecated
        if not name.startswith('_'):
            for alias_name in ('_%s' % n for n in potential_names):
                # We've already cached all the paths at this point
                if alias_name in self._plugin_path_cache:
                    if not os.path.islink(self._plugin_path_cache[alias_name]):
                        d = Display()
                        d.warning('%s has been deprecated, which means '
                                  'it is kept for backwards compatibility '
                                  'but usage is discouraged. The module '
                                  'documentation details page may explain '
                                  'more about this rationale.' %
                                  name.lstrip('_'))
                    return self._plugin_path_cache[alias_name]

        return None
Exemple #2
0
def test_warning_no_color(capsys, mocker, warning_message):
    warning_message, expected_warning_message = warning_message

    mocker.patch('ansible.utils.color.ANSIBLE_COLOR', False)

    d = Display()
    d.warning(warning_message)
    out, err = capsys.readouterr()
    assert d._warns == {expected_warning_message: 1}
    assert err == expected_warning_message
Exemple #3
0
    def find_plugin(self, name, suffixes=None):
        ''' Find a plugin named name '''

        if not suffixes:
            if self.class_name:
                suffixes = ['.py']
            else:
                suffixes = ['.py', '']

        potential_names = frozenset('%s%s' % (name, s) for s in suffixes)
        for full_name in potential_names:
            if full_name in self._plugin_path_cache:
                return self._plugin_path_cache[full_name]

        found = None
        for path in [p for p in self._get_paths() if p not in self._searched_paths]:
            if os.path.isdir(path):
                try:
                    full_paths = (os.path.join(path, f) for f in os.listdir(path))
                except OSError as e:
                    d = Display()
                    d.warning("Error accessing plugin paths: %s" % str(e))
                for full_path in (f for f in full_paths if os.path.isfile(f)):
                    for suffix in suffixes:
                        if full_path.endswith(suffix):
                            full_name = os.path.basename(full_path)
                            break
                    else: # Yes, this is a for-else: http://bit.ly/1ElPkyg
                        continue

                    if full_name not in self._plugin_path_cache:
                        self._plugin_path_cache[full_name] = full_path

            self._searched_paths.add(path)
            for full_name in potential_names:
                if full_name in self._plugin_path_cache:
                    return self._plugin_path_cache[full_name]

        # if nothing is found, try finding alias/deprecated
        if not name.startswith('_'):
            for alias_name in ('_%s' % n for n in potential_names):
                # We've already cached all the paths at this point
                if alias_name in self._plugin_path_cache:
                    if not os.path.islink(self._plugin_path_cache[alias_name]):
                        d = Display()
                        d.warning('%s has been deprecated, which means '
                                  'it is kept for backwards compatibility '
                                  'but usage is discouraged. The module '
                                  'documentation details page may explain '
                                  'more about this rationale.' %
                                  name.lstrip('_'))
                    return self._plugin_path_cache[alias_name]

        return None
Exemple #4
0
def test_warning(capsys, mocker, warning_message):
    warning_message, expected_warning_message = warning_message

    mocker.patch('ansible.utils.color.ANSIBLE_COLOR', True)
    mocker.patch('ansible.utils.color.parsecolor',
                 return_value=u'1;35')  # value for 'bright purple'

    d = Display()
    d.warning(warning_message)
    out, err = capsys.readouterr()
    assert d._warns == {expected_warning_message: 1}
    assert err == '\x1b[1;35m{0}\x1b[0m\n\x1b[1;35m\x1b[0m'.format(
        expected_warning_message.rstrip('\n'))
Exemple #5
0
    def find_plugin(self, name, suffixes=None):
        ''' Find a plugin named name '''

        if not suffixes:
            if self.class_name:
                suffixes = ['.py']
            else:
                suffixes = ['.py', '']

        potential_names = frozenset('%s%s' % (name, s) for s in suffixes)
        for full_name in potential_names:
            if full_name in self._plugin_path_cache:
                return self._plugin_path_cache[full_name]

        found = None
        for path in [
                p for p in self._get_paths() if p not in self._searched_paths
        ]:
            if os.path.isdir(path):
                try:
                    full_paths = (os.path.join(path, f)
                                  for f in os.listdir(path))
                except OSError as e:
                    d = Display()
                    d.warning("Error accessing plugin paths: %s" % str(e))
                for full_path in (f for f in full_paths if os.path.isfile(f)):
                    for suffix in suffixes:
                        if full_path.endswith(suffix):
                            full_name = os.path.basename(full_path)
                            break
                    else:  # Yes, this is a for-else: http://bit.ly/1ElPkyg
                        continue

                    if full_name not in self._plugin_path_cache:
                        self._plugin_path_cache[full_name] = full_path

            self._searched_paths.add(path)
            for full_name in potential_names:
                if full_name in self._plugin_path_cache:
                    return self._plugin_path_cache[full_name]

        # if nothing is found, try finding alias/deprecated
        if not name.startswith('_'):
            for alias_name in ('_%s' % n for n in potential_names):
                # We've already cached all the paths at this point
                if alias_name in self._plugin_path_cache:
                    return self._plugin_path_cache[alias_name]

        return None
Exemple #6
0
    def find_plugin(self, name, suffixes=None):
        ''' Find a plugin named name '''

        if not suffixes:
            if self.class_name:
                suffixes = ['.py']
            else:
                suffixes = ['.py', '']

        potential_names = frozenset('%s%s' % (name, s) for s in suffixes)
        for full_name in potential_names:
            if full_name in self._plugin_path_cache:
                return self._plugin_path_cache[full_name]

        found = None
        for path in [p for p in self._get_paths() if p not in self._searched_paths]:
            if os.path.isdir(path):
                try:
                    full_paths = (os.path.join(path, f) for f in os.listdir(path))
                except OSError as e:
                    d = Display()
                    d.warning("Error accessing plugin paths: %s" % str(e))
                for full_path in (f for f in full_paths if os.path.isfile(f)):
                    for suffix in suffixes:
                        if full_path.endswith(suffix):
                            full_name = os.path.basename(full_path)
                            break
                    else: # Yes, this is a for-else: http://bit.ly/1ElPkyg
                        continue

                    if full_name not in self._plugin_path_cache:
                        self._plugin_path_cache[full_name] = full_path

            self._searched_paths.add(path)
            for full_name in potential_names:
                if full_name in self._plugin_path_cache:
                    return self._plugin_path_cache[full_name]

        # if nothing is found, try finding alias/deprecated
        if not name.startswith('_'):
            for alias_name in ('_%s' % n for n in potential_names):
                # We've already cached all the paths at this point
                if alias_name in self._plugin_path_cache:
                    return self._plugin_path_cache[alias_name]

        return None
Exemple #7
0
JINJA2_OVERRIDE = '#jinja2:'

from jinja2 import __version__ as j2_version

USE_JINJA2_NATIVE = False
if C.DEFAULT_JINJA2_NATIVE:
    try:
        from jinja2.nativetypes import NativeEnvironment as Environment
        from ansible.template.native_helpers import ansible_native_concat as j2_concat
        from ansible.template.native_helpers import NativeJinjaText
        USE_JINJA2_NATIVE = True
    except ImportError:
        from jinja2 import Environment
        from jinja2.utils import concat as j2_concat
        display.warning(
            'jinja2_native requires Jinja 2.10 and above. '
            'Version detected: %s. Falling back to default.' % j2_version
        )
else:
    from jinja2 import Environment
    from jinja2.utils import concat as j2_concat


JINJA2_BEGIN_TOKENS = frozenset(('variable_begin', 'block_begin', 'comment_begin', 'raw_begin'))
JINJA2_END_TOKENS = frozenset(('variable_end', 'block_end', 'comment_end', 'raw_end'))


RANGE_TYPE = type(range(0))


def generate_ansible_template_vars(path, dest_path=None):
    b_path = to_bytes(path)
class Grapher(object):
    """
    Main class to make the graph
    """
    DEFAULT_GRAPH_ATTR = {
        "ratio": "fill",
        "rankdir": "LR",
        "concentrate": "true",
        "ordering": "in"
    }
    DEFAULT_EDGE_ATTR = {"sep": "10", "esep": "5"}

    def __init__(self,
                 data_loader,
                 inventory_manager,
                 variable_manager,
                 playbook_filename,
                 options,
                 graph=None):
        """
        Main grapher responsible to parse the playbook and draw graph
        :param data_loader:
        :type data_loader: ansible.parsing.dataloader.DataLoader
        :param inventory_manager:
        :type inventory_manager: ansible.inventory.manager.InventoryManager
        :param variable_manager:
        :type variable_manager: ansible.vars.manager.VariableManager
        :param options Command line options
        :type options: optparse.Values
        :param playbook_filename:
        :type playbook_filename: str
        :param graph:
        :type graph: Digraph
        """
        self.options = options
        self.variable_manager = variable_manager
        self.inventory_manager = inventory_manager
        self.data_loader = data_loader
        self.playbook_filename = playbook_filename
        self.options.output_filename = self.options.output_filename
        self.rendered_file_path = None
        self.display = Display(verbosity=options.verbosity)

        if self.options.tags is None:
            self.options.tags = ["all"]

        if self.options.skip_tags is None:
            self.options.skip_tags = []

        self.graph_representation = GraphRepresentation()

        self.playbook = Playbook.load(self.playbook_filename,
                                      loader=self.data_loader,
                                      variable_manager=self.variable_manager)

        if graph is None:
            self.graph = CustomDigrah(edge_attr=self.DEFAULT_EDGE_ATTR,
                                      graph_attr=self.DEFAULT_GRAPH_ATTR,
                                      format="svg")

    def template(self, data, variables, fail_on_undefined=False):
        """
        Template the data using Jinja. Return data if an error occurs during the templating
        :param fail_on_undefined:
        :type fail_on_undefined: bool
        :param data:
        :type data: Union[str, ansible.parsing.yaml.objects.AnsibleUnicode]
        :param variables:
        :type variables: dict
        :return:
        """
        try:
            templar = Templar(loader=self.data_loader, variables=variables)
            return templar.template(data, fail_on_undefined=fail_on_undefined)
        except AnsibleError as ansible_error:
            # Sometime we need to export
            if fail_on_undefined:
                raise
            self.display.warning(ansible_error)
            return data

    def make_graph(self):
        """
        Loop through the playbook and make the graph.

        The graph is drawn following this order (https://docs.ansible.com/ansible/2.4/playbooks_reuse_roles.html#using-roles)
        for each play:
            draw pre_tasks
            draw roles
                if  include_role_tasks
                    draw role_tasks
            draw tasks
            draw post_tasks
        :return:
        :rtype:
        """

        # the root node
        self.graph.node(self.playbook_filename, style="dotted", id="root_node")

        # loop through the plays
        for play_counter, play in enumerate(self.playbook.get_plays(), 1):

            # the load basedir is relative to the playbook path
            if play._included_path is not None:
                self.data_loader.set_basedir(play._included_path)
            else:
                self.data_loader.set_basedir(self.playbook._basedir)
            self.display.vvv("Loader basedir set to {}".format(
                self.data_loader.get_basedir()))

            play_vars = self.variable_manager.get_vars(play)
            play_hosts = [
                h.get_name() for h in self.inventory_manager.get_hosts(
                    self.template(play.hosts, play_vars))
            ]
            play_name = "Play #{}: {} ({})".format(play_counter,
                                                   clean_name(play.get_name()),
                                                   len(play_hosts))
            play_name = self.template(play_name, play_vars)

            self.display.banner("Graphing " + play_name)

            play_id = "play_" + str(uuid.uuid4())

            self.graph_representation.add_node(play_id)

            with self.graph.subgraph(name=play_name) as play_subgraph:
                color, play_font_color = get_play_colors(play)
                # play node
                play_subgraph.node(play_name,
                                   id=play_id,
                                   style="filled",
                                   shape="box",
                                   color=color,
                                   fontcolor=play_font_color,
                                   tooltip="     ".join(play_hosts))

                # edge from root node to plays
                play_edge_id = "edge_" + str(uuid.uuid4())
                play_subgraph.edge(self.playbook_filename,
                                   play_name,
                                   id=play_edge_id,
                                   style="bold",
                                   label=str(play_counter),
                                   color=color,
                                   fontcolor=color)

                # loop through the pre_tasks
                self.display.v("Graphing pre_tasks...")
                nb_pre_tasks = 0
                for pre_task_block in play.pre_tasks:
                    nb_pre_tasks = self._include_tasks_in_blocks(
                        current_play=play,
                        graph=play_subgraph,
                        parent_node_name=play_name,
                        parent_node_id=play_id,
                        block=pre_task_block,
                        color=color,
                        current_counter=nb_pre_tasks,
                        play_vars=play_vars,
                        node_name_prefix="[pre_task] ")

                # loop through the roles
                self.display.v("Graphing roles...")
                role_number = 0
                for role in play.get_roles():
                    # Don't insert tasks from ``import/include_role``, preventing
                    # duplicate graphing
                    if role.from_include:
                        continue

                    role_number += 1

                    role_name = "[role] " + clean_name(role.get_name())

                    # the role object doesn't inherit the tags from the play. So we add it manually
                    role.tags = role.tags + play.tags

                    role_not_tagged = ""
                    if not role.evaluate_tags(only_tags=self.options.tags,
                                              skip_tags=self.options.skip_tags,
                                              all_vars=play_vars):
                        role_not_tagged = NOT_TAGGED

                    with self.graph.subgraph(name=role_name,
                                             node_attr={}) as role_subgraph:
                        current_counter = role_number + nb_pre_tasks
                        role_id = "role_" + str(uuid.uuid4()) + role_not_tagged
                        role_subgraph.node(role_name, id=role_id)

                        edge_id = "edge_" + str(uuid.uuid4()) + role_not_tagged

                        # edge from play to role
                        role_subgraph.edge(play_name,
                                           role_name,
                                           label=str(current_counter),
                                           color=color,
                                           fontcolor=color,
                                           id=edge_id)

                        self.graph_representation.add_link(play_id, edge_id)
                        self.graph_representation.add_link(edge_id, role_id)

                        # loop through the tasks of the roles
                        if self.options.include_role_tasks:
                            role_tasks_counter = 0
                            for block in role.compile(play):
                                role_tasks_counter = self._include_tasks_in_blocks(
                                    current_play=play,
                                    graph=role_subgraph,
                                    parent_node_name=role_name,
                                    parent_node_id=role_id,
                                    block=block,
                                    color=color,
                                    play_vars=play_vars,
                                    current_counter=role_tasks_counter,
                                    node_name_prefix="[task] ")
                                role_tasks_counter += 1
                self.display.v(
                    "{} roles added to the graph".format(role_number))

                # loop through the tasks
                self.display.v("Graphing tasks...")
                nb_tasks = 0
                for task_block in play.tasks:
                    nb_tasks = self._include_tasks_in_blocks(
                        current_play=play,
                        graph=play_subgraph,
                        parent_node_name=play_name,
                        parent_node_id=play_id,
                        block=task_block,
                        color=color,
                        current_counter=role_number + nb_pre_tasks,
                        play_vars=play_vars,
                        node_name_prefix="[task] ")

                # loop through the post_tasks
                self.display.v("Graphing post_tasks...")
                for post_task_block in play.post_tasks:
                    self._include_tasks_in_blocks(
                        current_play=play,
                        graph=play_subgraph,
                        parent_node_name=play_name,
                        parent_node_id=play_id,
                        block=post_task_block,
                        color=color,
                        current_counter=nb_tasks,
                        play_vars=play_vars,
                        node_name_prefix="[post_task] ")

            self.display.banner("Done graphing {}".format(play_name))
            self.display.display("")  # just an empty line
            # moving to the next play

    def render_graph(self):
        """
        Render the graph
        :return: The rendered file path
        :rtype: str
        """

        self.rendered_file_path = self.graph.render(
            cleanup=not self.options.save_dot_file,
            filename=self.options.output_filename)
        if self.options.save_dot_file:
            # add .gv extension. The render doesn't add an extension
            final_name = self.options.output_filename + ".dot"
            os.rename(self.options.output_filename, final_name)
            self.display.display(
                "Graphviz dot file has been exported to {}".format(final_name))

        return self.rendered_file_path

    def post_process_svg(self):
        """
        Post process the rendered svg
        :return The post processed file path
        :rtype: str
        :return:
        """
        post_processor = PostProcessor(svg_path=self.rendered_file_path)

        post_processor.post_process(
            graph_representation=self.graph_representation)

        post_processor.write()

        self.display.display("The graph has been exported to {}".format(
            self.rendered_file_path))

        return self.rendered_file_path

    def _include_tasks_in_blocks(self,
                                 current_play,
                                 graph,
                                 parent_node_name,
                                 parent_node_id,
                                 block,
                                 color,
                                 current_counter,
                                 play_vars=None,
                                 node_name_prefix=""):
        """
        Recursively read all the tasks of the block and add it to the graph
        FIXME: This function needs some refactoring. Thinking of a BlockGrapher to handle this
        :param current_play:
        :type current_play: ansible.playbook.play.Play
        :param graph:
        :type graph:
        :param parent_node_name:
        :type parent_node_name: str
        :param parent_node_id:
        :type parent_node_id: str
        :param block:
        :type block: Union[Block,TaskInclude]
        :param color:
        :type color: str
        :param current_counter:
        :type current_counter: int
        :param play_vars:
        :type play_vars: dict
        :param node_name_prefix:
        :type node_name_prefix: str
        :return:
        :rtype:
        """

        loop_counter = current_counter
        # loop through the tasks
        for counter, task_or_block in enumerate(block.block, 1):
            if isinstance(task_or_block, Block):
                loop_counter = self._include_tasks_in_blocks(
                    current_play=current_play,
                    graph=graph,
                    parent_node_name=parent_node_name,
                    parent_node_id=parent_node_id,
                    block=task_or_block,
                    color=color,
                    current_counter=loop_counter,
                    play_vars=play_vars,
                    node_name_prefix=node_name_prefix)
            elif isinstance(
                    task_or_block, TaskInclude
            ):  # include, include_tasks, include_role are dynamic
                # So we need to process it explicitly because Ansible does it during th execution of the playbook

                task_vars = self.variable_manager.get_vars(play=current_play,
                                                           task=task_or_block)

                if isinstance(task_or_block, IncludeRole):

                    self.display.v(
                        "An 'include_role' found. Including tasks from '{}'".
                        format(task_or_block.args["name"]))
                    # here we have an include_role. The class IncludeRole is a subclass of TaskInclude.
                    # We do this because the management of an include_role is different.
                    # See :func:`~ansible.playbook.included_file.IncludedFile.process_include_results` from line 155
                    my_blocks, _ = task_or_block.get_block_list(
                        play=current_play,
                        loader=self.data_loader,
                        variable_manager=self.variable_manager)
                else:
                    self.display.v(
                        "An 'include_tasks' found. Including tasks from '{}'".
                        format(task_or_block.get_name()))
                    templar = Templar(loader=self.data_loader,
                                      variables=task_vars)
                    try:
                        include_file = handle_include_path(
                            original_task=task_or_block,
                            loader=self.data_loader,
                            templar=templar)
                    except AnsibleUndefinedVariable as e:
                        # TODO: mark this task with some special shape or color
                        self.display.warning(
                            "Unable to translate the include task '{}' due to an undefined variable: {}. "
                            "Some variables are available only during the real execution."
                            .format(task_or_block.get_name(), str(e)))
                        loop_counter += 1
                        self._include_task(task_or_block, loop_counter,
                                           task_vars, graph, node_name_prefix,
                                           color, parent_node_id,
                                           parent_node_name)
                        continue

                    data = self.data_loader.load_from_file(include_file)
                    if data is None:
                        self.display.warning(
                            "file %s is empty and had no tasks to include" %
                            include_file)
                        continue
                    elif not isinstance(data, list):
                        raise AnsibleParserError(
                            "included task files must contain a list of tasks",
                            obj=data)

                    # get the blocks from the include_tasks
                    my_blocks = load_list_of_blocks(
                        data,
                        play=current_play,
                        variable_manager=self.variable_manager,
                        role=task_or_block._role,
                        loader=self.data_loader,
                        parent_block=task_or_block)

                for b in my_blocks:  # loop through the blocks inside the included tasks or role
                    loop_counter = self._include_tasks_in_blocks(
                        current_play=current_play,
                        graph=graph,
                        parent_node_name=parent_node_name,
                        parent_node_id=parent_node_id,
                        block=b,
                        color=color,
                        current_counter=loop_counter,
                        play_vars=task_vars,
                        node_name_prefix=node_name_prefix)
            else:
                # check if this task comes from a role and we dont want to include role's task
                if has_role_parent(
                        task_or_block) and not self.options.include_role_tasks:
                    # skip role's task
                    self.display.vv(
                        "The task '{}' has a role as parent and include_role_tasks is false. "
                        "It will be skipped.".format(task_or_block.get_name()))
                    continue

                self._include_task(task_or_block=task_or_block,
                                   loop_counter=loop_counter + 1,
                                   play_vars=play_vars,
                                   graph=graph,
                                   node_name_prefix=node_name_prefix,
                                   color=color,
                                   parent_node_id=parent_node_id,
                                   parent_node_name=parent_node_name)

                loop_counter += 1

        return loop_counter

    def _include_task(self, task_or_block, loop_counter, play_vars, graph,
                      node_name_prefix, color, parent_node_id,
                      parent_node_name):
        """
        Include the task in the graph
        :return:
        :rtype:
        """
        self.display.vv("Adding the task '{}' to the graph".format(
            task_or_block.get_name()))
        # check if the task should be included
        tagged = ''
        if not task_or_block.evaluate_tags(only_tags=self.options.tags,
                                           skip_tags=self.options.skip_tags,
                                           all_vars=play_vars):
            self.display.vv(
                "The task '{}' should not be executed. It will be marked as NOT_TAGGED"
                .format(task_or_block.get_name()))
            tagged = NOT_TAGGED

        task_edge_label = str(loop_counter)
        if len(task_or_block.when) > 0:
            when = "".join(map(str, task_or_block.when))
            task_edge_label += "  [when: " + when + "]"

        task_name = clean_name(
            node_name_prefix +
            self.template(task_or_block.get_name(), play_vars))
        # get prefix id from node_name
        id_prefix = node_name_prefix.replace("[",
                                             "").replace("]",
                                                         "").replace(" ", "_")
        task_id = id_prefix + str(uuid.uuid4()) + tagged
        edge_id = "edge_" + str(uuid.uuid4()) + tagged

        graph.node(task_name, shape="octagon", id=task_id)
        graph.edge(parent_node_name,
                   task_name,
                   label=task_edge_label,
                   color=color,
                   fontcolor=color,
                   style="bold",
                   id=edge_id)
        self.graph_representation.add_link(parent_node_id, edge_id)
        self.graph_representation.add_link(edge_id, task_id)
Exemple #9
0
def defined(value,
            test_value=None,
            var_type=None,
            fail_action=None,
            var_name=None,
            run_tests=False):
    """
    defined - Ansible test plugin to test if a variable is defined and not none

    Arista.avd.defined will test value if defined and is not none and return true or false.
    If test_value is supplied, the value must also pass == test_value to return true.
    If var_type is supplied, the value must also be of the specified class/type
    If fail_action is 'warning' a warning will be emitted on failure.
    If fail_action is 'error' an error will be emitted on failure and the task will fail.
    If var_name is supplied it will be used in the warning and error messages to ease troubleshooting.

    Examples:
    1. Test if var is defined and not none:
    {% if spanning_tree is arista.avd.defined %}
    ...
    {% endif %}

    2. Test if variable is defined, not none and has value "something"
    {% if extremely_long_variable_name is arista.avd.defined("something") %}
    ...
    {% endif %}

    3. Test if variable is defined and of not print a warning message with the variable name
    {% if my_dict.my_list[12].my_var is arista.avd.defined(fail_action='warning', var_name='my_dict.my_list[12].my_var' %}

    Parameters
    ----------
    value : any
        Value to test from ansible
    test_value : any, optional
        Value to test in addition of defined and not none, by default None
    var_type : ['float', 'int', 'str', 'list', 'dict', 'tuple', 'bool'], optional
        Type or Class to test for
    fail_action : ['warning', 'error'], optional
        Optional action if test fails to emit a Warning or Error
    var_name : <string>, optional
        Optional string to use as variable name in warning or error messages

    Returns
    -------
    boolean
        True if variable matches criteria, False in other cases.
    """
    display = Display()
    if isinstance(value, Undefined) or value is None:
        # Invalid value - return false
        if str(fail_action).lower() == 'warning':
            display._warns = {}
            if var_name is not None:
                display.warning(
                    f"{var_name} was expected but not set. Output may be incorrect or incomplete!"
                )
            else:
                display.warning(
                    "A variable was expected but not set. Output may be incorrect or incomplete!"
                )
        elif str(fail_action).lower() == 'error':
            if var_name is not None:
                raise AnsibleError(f"{var_name} was expected but not set!")
            else:
                raise AnsibleError("A variable was expected but not set!")
        if run_tests:
            return False, display._warns
        return False

    elif test_value is not None and value != test_value:
        # Valid value but not matching the optional argument
        if str(fail_action).lower() == 'warning':
            display._warns = {}
            if var_name is not None:
                display.warning(
                    f"{var_name} was set to {value} but we expected {test_value}. Output may be incorrect or incomplete!"
                )
            else:
                display.warning(
                    f"A variable was set to {value} but we expected {test_value}. Output may be incorrect or incomplete!"
                )
        elif str(fail_action).lower() == 'error':
            if var_name is not None:
                raise AnsibleError(
                    f"{var_name} was set to {value} but we expected {test_value}!"
                )
            else:
                raise AnsibleError(
                    f"A variable was set to {value} but we expected {test_value}!"
                )
        if run_tests:
            return False, display._warns
        return False
    elif str(var_type).lower() in [
            'float', 'int', 'str', 'list', 'dict', 'tuple', 'bool'
    ] and str(var_type).lower() != type(value).__name__:
        # Invalid class - return false
        if str(fail_action).lower() == 'warning':
            display._warns = {}
            if var_name is not None:
                display.warning(
                    f"{var_name} was a {type(value).__name__} but we expected a {str(var_type).lower()}. Output may be incorrect or incomplete!"
                )
            else:
                display.warning(
                    f"A variable was a {type(value).__name__} but we expected a {str(var_type).lower()}. Output may be incorrect or incomplete!"
                )
        elif str(fail_action).lower() == 'error':
            if var_name is not None:
                raise AnsibleError(
                    f"{var_name} was a {type(value).__name__} but we expected a {str(var_type).lower()}!"
                )
            else:
                raise AnsibleError(
                    f"A variable was a {type(value).__name__} but we expected a {str(var_type).lower()}!"
                )
        if run_tests:
            return False, display._warns
        return False
    else:
        # Valid value and is matching optional argument if provided - return true
        return True
Exemple #10
0
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):

    NAME = "oci"
    LIFECYCLE_ACTIVE_STATE = "ACTIVE"
    LIFECYCLE_RUNNING_STATE = "RUNNING"
    LIFECYCLE_ATTACHED_STATE = "ATTACHED"

    def __init__(self):
        super(InventoryModule, self).__init__()

        self.inventory = None
        self.config = {}
        self.compartments = None
        self._region_subscriptions = None
        self.regions = {}
        self.params = {
            "config_file": os.path.join(os.path.expanduser("~"), ".oci", "config"),
            "profile": "DEFAULT",
            "user": None,
            "fingerprint": None,
            "key_file": None,
            "tenancy": None,
            "region": None,
            "pass_phrase": None,
            # other options
            "compartment_ocid": None,
            "compartment_name": None,
            "parent_compartment_ocid": None,
            "fetch_hosts_from_subcompartments": False,
            "debug": False,
            "hostname_format": "public_ip",
            "sanitize_names": True,
            "replace_dash_in_names": False,
            "max_thread_count": 50,
            "freeform_tags": None,
            "defined_tags": None,
            "regions": None,
            "strict_hostname_checking": "no",
            "filters": None,
        }

        self.group_prefix = "oci_"
        self.display = Display()

    def _get_config_file(self):
        """
            :param config_data: contents of the inventory config file
        """
        # Preference order: .oci.yml > environment variable > settings from config file.
        if self.get_option("config_file") is not None:
            self.params["config_file"] = os.path.expanduser(
                self.get_option("config_file")
            )
        elif "OCI_CONFIG_FILE" in os.environ:
            self.params["config_file"] = os.path.expanduser(
                os.path.expandvars(os.environ.get("OCI_CONFIG_FILE"))
            )

        if self.get_option("config_profile") is not None:
            self.params["profile"] = self.get_option("config_profile")
        elif "OCI_CONFIG_PROFILE" in os.environ:
            self.params["profile"] = os.environ.get("OCI_CONFIG_PROFILE")

    def read_config(self):

        self._get_config_file()
        # Read values from config file
        if os.path.isfile(to_bytes(self.params["config_file"])):
            self.config = oci.config.from_file(
                file_location=self.params["config_file"],
                profile_name=self.params["profile"],
            )

        self.config["additional_user_agent"] = (
            oci_config_utils.inventory_agent_name + oci_common_utils.__version__
        )
        self.log(self.config["additional_user_agent"])

        for setting in self.config:
            self.params[setting] = self.config[setting]

    def read_settings_config(self, boolean_options, dict_options):
        if self.settings_config.has_section("oci"):
            for option in self.settings_config.options("oci"):
                if option in boolean_options:
                    self.params[option] = self.settings_config.getboolean("oci", option)
                elif option in dict_options:
                    self.params[option] = json.loads(
                        self.settings_config.get("oci", option)
                    )
                else:
                    self.params[option] = self.settings_config.get("oci", option)

    def log(self, *args, **kwargs):
        if self.params["debug"]:
            self.display.warning(*args, **kwargs)
        pass

    def setup_clients(self, regions):
        """
            :param regions: A list of regions to create  clients

        """
        self.regions = regions

        self.identity_client = self.create_service_client(IdentityClient)

        self._compute_clients = dict(
            (region, self.create_service_client(ComputeClient, region=region))
            for region in self.regions
        )

        self._virtual_nw_clients = dict(
            (region, self.create_service_client(VirtualNetworkClient, region=region))
            for region in self.regions
        )

    def create_service_client(self, service_client_class, region=None):
        if not region:
            region = self.params["region"]
        params = dict(self.params, region=region)
        kwargs = {}
        if self._is_instance_principal_auth():
            kwargs["signer"] = self.create_instance_principal_signer()

        # Create service client class with the signer.
        client = service_client_class(params, **kwargs)

        return client

    def _is_instance_principal_auth(self):
        # check if auth is set to `instance_principal`.
        return self.get_option("instance_principal_authentication")

    @staticmethod
    def create_instance_principal_signer():
        try:
            signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
        except Exception as ex:
            raise Exception(
                "Failed retrieving certificates from localhost. Instance principal based authentication is only"
                "possible from within OCI compute instances. Exception: {0}".format(
                    str(ex)
                )
            )
        return signer

    @contextmanager
    def pool(self, **kwargs):
        pool = ThreadPool(**kwargs)
        try:
            yield pool
        finally:
            pool.close()
            # wait for all the instances to be processed
            pool.join()
            # terminate the pool
            pool.terminate()

    def _get_instances_by_region(self, regions):
        """
           :param regions: a list of regions in which to describe instances
           :return A list of instance dictionaries
        """
        self.setup_clients(regions)

        self.display.warning("Building inventory.")

        # Compartments(including the root compartment) from which the instances are to be retrieved.
        self.compartments = dict(
            (compartment.id, compartment)
            for compartment in self.get_compartments(
                compartment_ocid=self.params["compartment_ocid"],
                parent_compartment_ocid=self.params["parent_compartment_ocid"],
                compartment_name=self.params["compartment_name"],
            )
        )

        if not self.compartments:
            self.display.warning("No compartments matching the criteria.")
            return

        all_instances = self.get_instances(self.compartments)

        instance_inventories = [
            self.build_inventory_for_instance(instance, region)
            for region in all_instances
            for instance in all_instances[region]
        ]

        return instance_inventories

    def get_instances(self, compartment_ocids):
        """Get and return instances from all the specified compartments and regions.

        :param compartment_ocids: List of compartment ocid's to fetch the instances from
        :return: dict with region as key and list of instances of the region as value
        """
        instances = defaultdict(list)

        if self.get_option("enable_parallel_processing"):
            for region in self.regions:
                num_threads = min(
                    len(compartment_ocids), self.params["max_thread_count"]
                )
                self.display.warning(
                    "Parallel processing enabled. Getting instances from {0} in {1} threads.".format(
                        region, num_threads
                    )
                )

                with self.pool(processes=num_threads) as pool:
                    get_filtered_instances_for_region = partial(
                        self.get_filtered_instances, region=region
                    )
                    lists_of_instances = pool.map(
                        get_filtered_instances_for_region, compartment_ocids
                    )
                for sublist in lists_of_instances:
                    instances[region].extend(sublist)

        else:
            for region in self.regions:
                for compartment_ocid in compartment_ocids:
                    instances[region].extend(
                        self.get_filtered_instances(compartment_ocid, region)
                    )

        return instances

    def get_compartments(
        self,
        compartment_ocid=None,
        parent_compartment_ocid=None,
        compartment_name=None,
        fetch_hosts_from_subcompartments=True,
    ):
        """
        Get the compartments based on the parameters passed. When compartment_name is None, all the compartments
        including the root compartment is returned.

        When compartment_name is passed, the compartment with that name and its hierarchy of compartments are returned
        if fetch_hosts_from_subcompartments is true.

        The tenancy is returned when compartment_name is the tenancy name.

        :param str compartment_ocid: (optional)
            OCID of the compartment. If None, root compartment is assumed to be parent.
        :param str parent_compartment_ocid: (optional)
            OCID of the parent compartment. If None, root compartment is assumed to be parent.
        :param str compartment_name: (optional)
            Name of the compartment. If None and :attr:`compartment_ocid` is not set, all the compartments including
            the root compartment are returned.
        :param str fetch_hosts_from_subcompartments: (optional)
            Only applicable when compartment_name is specified. When set to true, the entire hierarchy of compartments
            of the given compartment is returned.
        :raises ServiceError: When the Service returned an Error response
        :raises MaximumWaitTimeExceededError: When maximum wait time is exceeded while invoking target_fn
        :return: list of :class:`~oci.identity.models.Compartment`
        """
        if compartment_ocid:
            try:
                compartment_with_ocid = oci_utils.call_with_backoff(
                    self.identity_client.get_compartment,
                    compartment_id=compartment_ocid,
                ).data
            except ServiceError as se:
                if se.status == 404:
                    raise Exception(
                        "Compartment with OCID {0} either does not exist or "
                        "you do not have permission to access it.".format(
                            compartment_ocid
                        )
                    )
            else:
                if not fetch_hosts_from_subcompartments:
                    return [compartment_with_ocid]
                return self.get_sub_compartments(compartment_with_ocid)

        if not self.params["tenancy"]:
            raise Exception(
                "Tenancy OCID required to get the compartments in the tenancy."
            )

        try:
            tenancy = oci_utils.call_with_backoff(
                self.identity_client.get_compartment,
                compartment_id=self.params["tenancy"],
            ).data
        except ServiceError as se:
            if se.status == 404:
                raise Exception(
                    "Either tenancy ocid is invalid or need inspect permission on root compartment to get the "
                    "compartments in the tenancy."
                )

        all_compartments = [tenancy] + [
            compartment
            for compartment in oci_utils.list_all_resources(
                target_fn=self.identity_client.list_compartments,
                compartment_id=self.params["tenancy"],
                compartment_id_in_subtree=True,
            )
            if self.filter_resource(
                compartment, lifecycle_state=self.LIFECYCLE_ACTIVE_STATE
            )
        ]

        # return all the compartments if compartment_name is not passed
        if not compartment_name:
            return all_compartments

        if compartment_name == tenancy.name:
            # return all the compartments when fetch_hosts_from_subcompartments is true
            if fetch_hosts_from_subcompartments:
                return all_compartments
            else:
                return [tenancy]

        if not parent_compartment_ocid:
            parent_compartment_ocid = tenancy.id

        compartment_with_name = None
        for compartment in all_compartments:
            if (
                compartment.name == compartment_name
                and compartment.compartment_id == parent_compartment_ocid
            ):
                compartment_with_name = compartment
                break

        if not compartment_with_name:
            raise Exception(
                "Compartment with name {0} not found.".format(compartment_name)
            )

        if not fetch_hosts_from_subcompartments:
            return [compartment_with_name]

        return self.get_sub_compartments(compartment_with_name)

    @staticmethod
    def filter_resource(resource, **kwargs):
        for key, val in six.iteritems(kwargs):
            if getattr(resource, key, None) != val:
                return False
        return True

    def get_sub_compartments(self, root):
        # OCI SDK does not support fetching sub-compartments for non root compartments
        # So traverse the compartment tree to fetch all the sub compartments
        compartments = []
        queue = deque()
        queue.append(root)
        while len(queue) > 0:
            parent_compartment = queue.popleft()
            compartments.append(parent_compartment)
            child_compartments = [
                compartment
                for compartment in oci_utils.list_all_resources(
                    target_fn=self.identity_client.list_compartments,
                    compartment_id=parent_compartment.id,
                )
                if self.filter_resource(
                    compartment, lifecycle_state=self.LIFECYCLE_ACTIVE_STATE
                )
            ]
            for child_compartment in child_compartments:
                queue.append(child_compartment)
        return compartments

    def build_inventory_for_instance(self, instance, region):
        """Build and return inventory for an instance"""
        try:
            instance_inventory = {}
            compute_client = self.get_compute_client_for_region(region)
            virtual_nw_client = self.get_virtual_nw_client_for_region(region)
            compartment = self.compartments[instance.compartment_id]

            instance_vars = to_dict(instance)

            common_groups = set(["all_hosts"])
            # Group by availability domain
            ad = self.sanitize(instance.availability_domain)
            common_groups.add(ad)

            # Group by compartments
            compartment_name = self.sanitize(compartment.name)
            common_groups.add(compartment_name)

            # Group by region
            region_grp = self.sanitize("region_" + region)
            common_groups.add(region_grp)

            # Group by freeform tags tag_key=value
            for key in instance.freeform_tags:
                tag_group_name = self.sanitize(
                    "tag_" + key + "=" + instance.freeform_tags[key]
                )
                common_groups.add(tag_group_name)

            # Group by defined tags
            for namespace in instance.defined_tags:
                for key in instance.defined_tags[namespace]:
                    defined_tag_group_name = self.sanitize(
                        namespace
                        + "#"
                        + key
                        + "="
                        + instance.defined_tags[namespace][key]
                    )
                    common_groups.add(defined_tag_group_name)

            vnic_attachments = [
                vnic_attachment
                for vnic_attachment in oci_utils.list_all_resources(
                    target_fn=compute_client.list_vnic_attachments,
                    compartment_id=compartment.id,
                    instance_id=instance.id,
                )
                if self.filter_resource(
                    vnic_attachment, lifecycle_state=self.LIFECYCLE_ATTACHED_STATE
                )
            ]

            for vnic_attachment in vnic_attachments:

                vnic = oci_utils.call_with_backoff(
                    virtual_nw_client.get_vnic, vnic_id=vnic_attachment.vnic_id
                ).data
                self.log(
                    "VNIC {0} is attached to instance {1}.".format(
                        vnic.id, vnic_attachment.instance_id
                    )
                )

                subnet = oci_utils.call_with_backoff(
                    virtual_nw_client.get_subnet, subnet_id=vnic.subnet_id
                ).data

                if instance_vars.get("id") == vnic_attachment.instance_id:
                    instance_vars.update({"vcn_id": subnet.vcn_id})
                    instance_vars.update({"vnic_id": vnic.id})
                    instance_vars.update({"subnet_id": vnic.subnet_id})
                    instance_vars.update({"public_ip": vnic.public_ip})
                    instance_vars.update({"private_ip": vnic.private_ip})

                host_name = self.get_host_name(vnic, region=region)

                # Skip host which is not addressable using hostname_format
                if not host_name:
                    if self.params["strict_hostname_checking"] == "yes":
                        raise Exception(
                            "Instance with OCID: {0} does not have a valid hostname.".format(
                                vnic_attachment.instance_id
                            )
                        )
                    self.log(
                        "Skipped instance with OCID:" + vnic_attachment.instance_id
                    )
                    return None

                host_name = self.sanitize(host_name)

                groups = set(common_groups)

                self.display.warning(
                    "Creating inventory for host {0}.".format(host_name)
                )
                self.create_instance_inventory_for_host(
                    instance_inventory, host_name, vars=instance_vars, groups=groups
                )
            self.log("Final inventory for {0}.".format(str(instance_inventory)))
            return instance_inventory

        except ServiceError as ex:
            if ex.status == 401:
                self.log(ex)
                raise
            self.log(ex)

    def create_instance_inventory_for_host(
        self, instance_inventory, host_name, vars, groups
    ):
        instance_inventory.setdefault(host_name, {"groups": {}, "vars": {}})
        instance_inventory[host_name]["vars"] = vars
        for group in groups:
            instance_inventory[host_name]["groups"].setdefault(group, {"children": []})

        return instance_inventory

    def sanitize(self, word):
        # regex represents an invalid non-alphanumeric character except UNDERSCORE, HASH, EQUALS and DOT
        regex = r"[^A-Za-z0-9_#=."
        if self.params["sanitize_names"]:
            if not self.params["replace_dash_in_names"]:
                # Add DASH as a valid character in regex.
                regex += r"-"

            # Replace all invalid characters with UNDERSCORE
            return re.sub(regex + "]", "_", word)
        return word

    def get_filtered_instances(self, compartment_ocid, region):
        try:
            compute_client = self.get_compute_client_for_region(region)

            instances = oci_utils.list_all_resources(
                target_fn=compute_client.list_instances,
                compartment_id=compartment_ocid,
                lifecycle_state="RUNNING",
            )
            self.log(
                "All RUNNING instances from compartment {0}:{1}".format(
                    compartment_ocid, instances
                )
            )

            # Data is cached so filter using all of the data not using the API
            if "display_name" in self.filters and self.filters["display_name"]:
                instances = [
                    instance
                    for instance in instances
                    if (instance.display_name == self.filters["display_name"])
                ]

            if (
                "availability_domain" in self.filters
                and self.filters["availability_domain"]
            ):
                instances = [
                    instance
                    for instance in instances
                    if (
                        instance.availability_domain
                        == self.filters["availability_domain"]
                    )
                ]

            if "lifecycle_state" in self.filters and self.filters["lifecycle_state"]:
                instances = [
                    instance
                    for instance in instances
                    if (instance.lifecycle_state == self.filters["lifecycle_state"])
                ]

            if "freeform_tags" in self.filters and self.filters["freeform_tags"]:
                instances = [
                    instance
                    for instance in instances
                    if all(
                        instance.freeform_tags.get(key) == value
                        for key, value in six.iteritems(self.filters["freeform_tags"])
                    )
                ]
                self.display.warning(
                    "Instances in compartment {0} which match all the freeform tags: {1}".format(
                        compartment_ocid, instances
                    )
                )
            if "defined_tags" in self.filters and self.filters["defined_tags"]:
                instances = [
                    instance
                    for instance in instances
                    if all(
                        (instance.defined_tags.get(namespace, {})).get(key) == value
                        for namespace in self.filters["defined_tags"]
                        for key, value in six.iteritems(
                            self.filters["defined_tags"][namespace]
                        )
                    )
                ]
                self.display.warning(
                    "Instances in compartment {0} which match all the freeform & defined tags: {1}".format(
                        compartment_ocid, instances
                    )
                )
            return instances
        except ServiceError as ex:
            if ex.status == 401:
                self.display.warning(ex)
                raise
            self.display.warning(ex)
            return []

    def get_compute_client_for_region(self, region):
        if region not in self._compute_clients:
            raise ValueError(
                "Could not fetch the compute client for region {0}.".format(region)
            )
        return self._compute_clients[region]

    def get_virtual_nw_client_for_region(self, region):
        if region not in self._compute_clients:
            raise ValueError(
                "Could not fetch the virtual network for region {0}.".format(region)
            )
        return self._virtual_nw_clients[region]

    def get_host_name(self, vnic, region):
        virtual_nw_client = self.get_virtual_nw_client_for_region(region)
        if self.params["hostname_format"] == "fqdn":
            subnet = oci_utils.call_with_backoff(
                virtual_nw_client.get_subnet, subnet_id=vnic.subnet_id
            ).data
            vcn = oci_utils.call_with_backoff(
                virtual_nw_client.get_vcn, vcn_id=subnet.vcn_id
            ).data

            oraclevcn_domain_name = ".oraclevcn.com"
            if not (vnic.hostname_label or subnet.dns_label or vcn.dns_label):
                return None
            fqdn = (
                vnic.hostname_label
                + "."
                + subnet.dns_label
                + "."
                + vcn.dns_label
                + oraclevcn_domain_name
            )
            self.display.warning("FQDN for VNIC: {0} is {1}.".format(vnic.id, fqdn))
            return fqdn

        elif self.params["hostname_format"] == "private_ip":
            self.display.warning(
                "Private IP for VNIC: {0} is {1}.".format(vnic.id, vnic.private_ip)
            )
            return vnic.private_ip

        return vnic.public_ip

    def _query(self, regions):
        """
            :param regions: a list of regions to query
        """
        return self._get_instances_by_region(regions)

    def _populate(self, instance_inventories, hostnames):
        for instance_inventory in instance_inventories:
            if instance_inventory:
                for host_name, host_inventory in six.iteritems(instance_inventory):
                    if not hostnames or host_name in hostnames:
                        for group in host_inventory["groups"]:
                            self.inventory.add_group(group)
                            self.inventory.add_host(host_name, group=group)
                            self.inventory.set_variable(
                                host_name,
                                host_inventory["vars"]["display_name"],
                                host_inventory["vars"],
                            )
                            self.inventory.add_child("all", host_name)
                            self._set_composite_vars(
                                self.get_option("compose"),
                                host_inventory["vars"],
                                host_name,
                            )
                            self._add_host_to_composed_groups(
                                self.get_option("groups"),
                                host_inventory["vars"],
                                host_name,
                            )
                            self._add_host_to_keyed_groups(
                                self.get_option("keyed_groups"),
                                host_inventory["vars"],
                                host_name,
                            )

    def verify_file(self, path):
        """
            :param loader: an ansible.parsing.dataloader.DataLoader object
            :param path: the path to the inventory config file
            :return the contents of the config file
        """
        if super(InventoryModule, self).verify_file(path):
            if path.endswith(".oci.yml") or path.endswith(".oci.yaml"):
                return True

    def _get_query_options(self, config_data):
        """
            :param config_data: contents of the inventory config file
            :return A list of regions to query
                    a list of possible hostnames
        """
        options = {
            "regions": {"type_to_be": list, "value": config_data.get("regions", [])},
            "compartments": {
                "type_to_be": list,
                "value": config_data.get("compartments", []),
            },
            "filters": {"type_to_be": list, "value": config_data.get("filters", [])},
            "hostnames": {
                "type_to_be": list,
                "value": config_data.get("hostnames", []),
            },
        }

        # validate the options
        for name in options:
            options[name]["value"] = self._validate_option(
                name, options[name]["type_to_be"], options[name]["value"]
            )

        filters = dict((key, d[key]) for d in options["filters"]["value"] for key in d)
        self.filters = filters

        for item in options["compartments"]["value"]:
            self.display.warning(" --- compartments item: {0}   ".format(item))
            if "compartment_ocid" in item:
                self.params["compartment_ocid"] = item["compartment_ocid"]
            if "parent_compartment_ocid" in item:
                self.params["parent_compartment_ocid"] = item["parent_compartment_ocid"]
            if "compartment_name" in item:
                self.params["compartment_name"] = item["compartment_name"]

        regions = options["regions"]["value"]
        hostnames = options["hostnames"]["value"]

        return regions, filters, hostnames

    def _validate_option(self, name, desired_type, option_value):
        """
            :param name: the option name
            :param desired_type: the class the option needs to be
            :param option: the value the user has provided
            :return The option of the correct class
        """

        if isinstance(option_value, string_types) and desired_type == list:
            option_value = [option_value]

        if option_value is None:
            option_value = desired_type()

        if not isinstance(option_value, desired_type):
            raise AnsibleParserError(
                "The option %s (%s) must be a %s" % (name, option_value, desired_type)
            )

        return option_value

    def parse(self, inventory, loader, path, cache=True):
        super(InventoryModule, self).parse(inventory, loader, path)

        config_data = self._read_config_data(path)
        # read oci config
        self.read_config()

        regions, filters, hostnames = self._get_query_options(config_data)

        cache_key = self.get_cache_key(path)

        if not regions:
            regions = [self.params["region"]]

        # false when refresh_cache
        if cache:
            cache = self.get_option("cache")

        # Generate inventory
        cache_needs_update = False
        if cache:
            try:
                cached_results = self._cache[cache_key]
            except Exception:
                # cache expired or cache file doesn't exist
                cache_needs_update = True
            else:
                self.display.warning("Using cached results")
                self._populate(cached_results, hostnames)

        results = None
        if not cache or cache_needs_update:
            results = self._query(regions)
            self._populate(results, hostnames)

        # update the cached inventory
        if cache_needs_update or (not cache and self.get_option("cache")):
            self._cache[cache_key] = results
Exemple #11
0
def main(argv=sys.argv[1:]):
    display = Display()

    host_list = ['spark-node1', 'spark-node2', 'spark-node3']

    # initialize needed objects
    Options = namedtuple('Options', ['connection',
                                     'module_path',
                                     'log_path',
                                     'forks',
                                     'become',
                                     'become_method',
                                     'become_user',
                                     'verbosity',
                                     'check'])
    variable_manager = VariableManager()
    loader = DataLoader()
    options = Options(connection='ssh',
                      module_path='/path/to/mymodules',
                      log_path='./log',
                      forks=100,
                      become=None,
                      become_method=None,
                      become_user=None,
                      verbosity=None,
                      check=False)

    # create inventory and pass to var manager
    inventory = Inventory(loader=loader,
                          variable_manager=variable_manager,
                          host_list=host_list)

    variable_manager.set_inventory(inventory)

    # inventory_list = inventory.get_hosts()
    # display.display('hosts: %s' % dir(inventory_list[0]))
    # h = inventory_list[0].get_name()
    # display.display('hosts: %s' % inventory_list[0].get_name())
    # display.display('hosts: %s' % variable_manager.get_vars(loader))
    # display.display('host list: %s' % inventory_list)
    display.warning('Running Ansible embedded')

    # create play with tasks
    play_source = dict(name="Ansible Play",
                       hosts=host_list,
                       gather_facts='no',
                       tasks=[
                           dict(action=dict(module='command',
                                args='uname -a'), register='shell_out'),
                           dict(action=dict(module='debug',
                                args=dict(msg='{{shell_out.stdout}}')))
                       ]
                       )

    play = Play().load(play_source,
                       variable_manager=variable_manager,
                       loader=loader)

    callback = ResultsCollector()

    # actually run it
    try:
        tqm = TaskQueueManager(inventory=inventory,
                               variable_manager=variable_manager,
                               loader=loader,
                               options=options,
                               passwords={},
                               stdout_callback=callback,
                               )

        result = tqm.run(play)
        if result != 0:
            print "ERROR"
    finally:
        if tqm is not None:
            tqm.cleanup()

    print "UP ***********"
    for host, result in callback.host_ok.items():
        print '{}: {}'.format(host, result._result['msg'])

    print "FAILED *******"
    for host, result in callback.host_failed.items():
        print '{}: {}'.format(host, result._result['msg'])

    print "DOWN *********"
    for host, result in callback.host_unreachable.items():
        print '{}: {}'.format(host, result._result['msg'])