Exemple #1
0
    def _add_host_to_keyed_groups(self, keys, variables, host, strict=False):
        ''' helper to create groups for plugins based on variable values and add the corresponding hosts to it'''
        if keys and isinstance(keys, list):
            for keyed in keys:
                if keyed and isinstance(keyed, dict):

                    variables = combine_vars(
                        variables,
                        self.inventory.get_host(host).get_vars())
                    try:
                        key = self._compose(keyed.get('key'), variables)
                    except Exception as e:
                        if strict:
                            raise AnsibleParserError(
                                "Could not generate group for host %s from %s entry: %s"
                                % (host, keyed.get('key'), to_native(e)))
                        continue

                    if key:
                        prefix = keyed.get('prefix', '')
                        sep = keyed.get('separator', '_')
                        raw_parent_name = keyed.get('parent_group', None)
                        if raw_parent_name:
                            try:
                                raw_parent_name = self.templar.template(
                                    raw_parent_name)
                            except AnsibleError as e:
                                if strict:
                                    raise AnsibleParserError(
                                        "Could not generate parent group %s for group %s: %s"
                                        % (raw_parent_name, key, to_native(e)))
                                continue

                        new_raw_group_names = []
                        if isinstance(key, string_types):
                            new_raw_group_names.append(key)
                        elif isinstance(key, list):
                            for name in key:
                                new_raw_group_names.append(name)
                        elif isinstance(key, Mapping):
                            for (gname, gval) in key.items():
                                name = '%s%s%s' % (gname, sep, gval)
                                new_raw_group_names.append(name)
                        else:
                            raise AnsibleParserError(
                                "Invalid group name format, expected a string or a list of them or dictionary, got: %s"
                                % type(key))

                        for bare_name in new_raw_group_names:
                            if prefix == '' and self.get_option(
                                    'leading_separator') is False:
                                sep = ''
                            gname = self._sanitize_group_name(
                                '%s%s%s' % (prefix, sep, bare_name))
                            result_gname = self.inventory.add_group(gname)
                            self.inventory.add_host(host, result_gname)

                            if raw_parent_name:
                                parent_name = self._sanitize_group_name(
                                    raw_parent_name)
                                self.inventory.add_group(parent_name)
                                self.inventory.add_child(
                                    parent_name, result_gname)

                    else:
                        # exclude case of empty list and dictionary, because these are valid constructions
                        # simply no groups need to be constructed, but are still falsy
                        if strict and key not in ([], {}):
                            raise AnsibleParserError(
                                "No key or key resulted empty for %s in host %s, invalid entry"
                                % (keyed.get('key'), host))
                else:
                    raise AnsibleParserError(
                        "Invalid keyed group entry, it must be a dictionary: %s "
                        % keyed)
Exemple #2
0
def load_list_of_tasks(ds,
                       play,
                       block=None,
                       role=None,
                       task_include=None,
                       use_handlers=False,
                       variable_manager=None,
                       loader=None):
    '''
    Given a list of task datastructures (parsed from YAML),
    return a list of Task() or TaskInclude() objects.
    '''

    # we import here to prevent a circular dependency with imports
    from ansible.playbook.block import Block
    from ansible.playbook.handler import Handler
    from ansible.playbook.task import Task
    from ansible.playbook.task_include import TaskInclude
    from ansible.playbook.handler_task_include import HandlerTaskInclude
    from ansible.template import Templar

    assert isinstance(ds, list)

    task_list = []
    for task_ds in ds:
        assert isinstance(task_ds, dict)

        if 'block' in task_ds:
            t = Block.load(
                task_ds,
                play=play,
                parent_block=block,
                role=role,
                task_include=None,
                use_handlers=use_handlers,
                variable_manager=variable_manager,
                loader=loader,
            )
            task_list.append(t)
        else:
            if 'include' in task_ds:
                if use_handlers:
                    t = HandlerTaskInclude.load(
                        task_ds,
                        block=block,
                        role=role,
                        task_include=task_include,
                        variable_manager=variable_manager,
                        loader=loader)
                else:
                    t = TaskInclude.load(task_ds,
                                         block=block,
                                         role=role,
                                         task_include=task_include,
                                         variable_manager=variable_manager,
                                         loader=loader)
                all_vars = variable_manager.get_vars(loader=loader,
                                                     play=play,
                                                     task=t)
                templar = Templar(loader=loader, variables=all_vars)

                # check to see if this include is dynamic or static:
                # 1. the user has set the 'static' option to false or true
                # 2. one of the appropriate config options was set
                if t.static is not None:
                    is_static = t.static
                else:
                    is_static = C.DEFAULT_TASK_INCLUDES_STATIC or \
                                (use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) or \
                                (not templar._contains_vars(t.args['_raw_params']) and t.all_parents_static() and not t.loop)

                if is_static:
                    if t.loop is not None:
                        raise AnsibleParserError(
                            "You cannot use 'static' on an include with a loop",
                            obj=task_ds)

                    # we set a flag to indicate this include was static
                    t.statically_loaded = True

                    # handle relative includes by walking up the list of parent include
                    # tasks and checking the relative result to see if it exists
                    parent_include = task_include
                    cumulative_path = None

                    found = False
                    while parent_include is not None:
                        parent_include_dir = templar.template(
                            os.path.dirname(
                                parent_include.args.get('_raw_params')))
                        if cumulative_path is None:
                            cumulative_path = parent_include_dir
                        elif not os.path.isabs(cumulative_path):
                            cumulative_path = os.path.join(
                                parent_include_dir, cumulative_path)
                        include_target = templar.template(
                            t.args['_raw_params'])
                        if t._role:
                            new_basedir = os.path.join(t._role._role_path,
                                                       'tasks',
                                                       cumulative_path)
                            include_file = loader.path_dwim_relative(
                                new_basedir, 'tasks', include_target)
                        else:
                            include_file = loader.path_dwim_relative(
                                loader.get_basedir(), cumulative_path,
                                include_target)

                        if os.path.exists(include_file):
                            found = True
                            break
                        else:
                            parent_include = parent_include._task_include

                    if not found:
                        try:
                            include_target = templar.template(
                                t.args['_raw_params'])
                        except AnsibleUndefinedVariable as e:
                            raise AnsibleParserError(
                                      "Error when evaluating variable in include name: %s.\n\n" \
                                      "When using static includes, ensure that any variables used in their names are defined in vars/vars_files\n" \
                                      "or extra-vars passed in from the command line. Static includes cannot use variables from inventory\n" \
                                      "sources like group or host vars." % t.args['_raw_params'],
                                      obj=task_ds,
                                      suppress_extended_error=True,
                                  )
                        if t._role:
                            if use_handlers:
                                include_file = loader.path_dwim_relative(
                                    t._role._role_path, 'handlers',
                                    include_target)
                            else:
                                include_file = loader.path_dwim_relative(
                                    t._role._role_path, 'tasks',
                                    include_target)
                        else:
                            include_file = loader.path_dwim(include_target)

                    try:
                        data = loader.load_from_file(include_file)
                        if data is None:
                            return []
                        elif not isinstance(data, list):
                            raise AnsibleError(
                                "included task files must contain a list of tasks",
                                obj=data)

                        # since we can't send callbacks here, we display a message directly in
                        # the same fashion used by the on_include callback. We also do it here,
                        # because the recursive nature of helper methods means we may be loading
                        # nested includes, and we want the include order printed correctly
                        display.display("statically included: %s" %
                                        include_file,
                                        color=C.COLOR_SKIP)
                    except AnsibleFileNotFound as e:
                        if t.static or \
                           C.DEFAULT_TASK_INCLUDES_STATIC or \
                           C.DEFAULT_HANDLER_INCLUDES_STATIC and use_handlers:
                            raise
                        display.deprecated(
                            "Included file '%s' not found, however since this include is not " \
                            "explicitly marked as 'static: yes', we will try and include it dynamically " \
                            "later. In the future, this will be an error unless 'static: no' is used " \
                            "on the include task. If you do not want missing includes to be considered " \
                            "dynamic, use 'static: yes' on the include or set the global ansible.cfg " \
                            "options to make all inclues static for tasks and/or handlers" % include_file,
                        )
                        task_list.append(t)
                        continue

                    included_blocks = load_list_of_blocks(
                        data,
                        play=play,
                        parent_block=block,
                        task_include=t,
                        role=role,
                        use_handlers=use_handlers,
                        loader=loader,
                        variable_manager=variable_manager,
                    )

                    # pop tags out of the include args, if they were specified there, and assign
                    # them to the include. If the include already had tags specified, we raise an
                    # error so that users know not to specify them both ways
                    tags = t.vars.pop('tags', [])
                    if isinstance(tags, string_types):
                        tags = tags.split(',')

                    if len(tags) > 0:
                        if len(t.tags) > 0:
                            raise AnsibleParserError(
                                "Include tasks should not specify tags in more than one way (both via args and directly on the task)." \
                                " Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement",
                                obj=task_ds,
                                suppress_extended_error=True,
                            )
                        display.deprecated(
                            "You should not specify tags in the include parameters. All tags should be specified using the task-level option"
                        )
                    else:
                        tags = t.tags[:]

                    # now we extend the tags on each of the included blocks
                    for b in included_blocks:
                        b.tags = list(set(b.tags).union(tags))
                    # END FIXME

                    # FIXME: handlers shouldn't need this special handling, but do
                    #        right now because they don't iterate blocks correctly
                    if use_handlers:
                        for b in included_blocks:
                            task_list.extend(b.block)
                    else:
                        task_list.extend(included_blocks)
                else:
                    task_list.append(t)
            else:
                if use_handlers:
                    t = Handler.load(task_ds,
                                     block=block,
                                     role=role,
                                     task_include=task_include,
                                     variable_manager=variable_manager,
                                     loader=loader)
                else:
                    t = Task.load(task_ds,
                                  block=block,
                                  role=role,
                                  task_include=task_include,
                                  variable_manager=variable_manager,
                                  loader=loader)
                task_list.append(t)

    return task_list
Exemple #3
0
    def preprocess_data(self, ds):
        '''
        tasks are especially complex arguments so need pre-processing.
        keep it short.
        '''

        assert isinstance(
            ds, dict), 'ds (%s) should be a dict but was a %s' % (ds, type(ds))

        # the new, cleaned datastructure, which will have legacy
        # items reduced to a standard structure suitable for the
        # attributes of the task class
        new_ds = AnsibleMapping()
        if isinstance(ds, AnsibleBaseYAMLObject):
            new_ds.ansible_pos = ds.ansible_pos

        # use the args parsing class to determine the action, args,
        # and the delegate_to value from the various possible forms
        # supported as legacy
        args_parser = ModuleArgsParser(task_ds=ds)
        try:
            (action, args, delegate_to) = args_parser.parse()
        except AnsibleParserError as e:
            raise AnsibleParserError(to_native(e), obj=ds, orig_exc=e)

        # the command/shell/script modules used to support the `cmd` arg,
        # which corresponds to what we now call _raw_params, so move that
        # value over to _raw_params (assuming it is empty)
        if action in ('command', 'shell', 'script'):
            if 'cmd' in args:
                if args.get('_raw_params', '') != '':
                    raise AnsibleError(
                        "The 'cmd' argument cannot be used when other raw parameters are specified."
                        " Please put everything in one or the other place.",
                        obj=ds)
                args['_raw_params'] = args.pop('cmd')

        new_ds['action'] = action
        new_ds['args'] = args
        new_ds['delegate_to'] = delegate_to

        # we handle any 'vars' specified in the ds here, as we may
        # be adding things to them below (special handling for includes).
        # When that deprecated feature is removed, this can be too.
        if 'vars' in ds:
            # _load_vars is defined in Base, and is used to load a dictionary
            # or list of dictionaries in a standard way
            new_ds['vars'] = self._load_vars(None, ds.get('vars'))
        else:
            new_ds['vars'] = dict()

        for (k, v) in iteritems(ds):
            if k in ('action', 'local_action', 'args',
                     'delegate_to') or k == action or k == 'shell':
                # we don't want to re-assign these values, which were determined by the ModuleArgsParser() above
                continue
            elif k.replace("with_", "") in lookup_loader:
                # transform into loop property
                self._preprocess_loop(ds, new_ds, k, v)
            else:
                # pre-2.0 syntax allowed variables for include statements at the top level of the task,
                # so we move those into the 'vars' dictionary here, and show a deprecation message
                # as we will remove this at some point in the future.
                if action in (
                        'include', 'include_tasks'
                ) and k not in self._valid_attrs and k not in self.DEPRECATED_ATTRIBUTES:
                    display.deprecated(
                        "Specifying include variables at the top-level of the task is deprecated."
                        " Please see:\nhttp://docs.ansible.com/ansible/playbooks_roles.html#task-include-files-and-encouraging-reuse\n\n"
                        " for currently supported syntax regarding included files and variables",
                        version="2.7")
                    new_ds['vars'][k] = v
                elif k in self._valid_attrs:
                    new_ds[k] = v
                else:
                    display.warning("Ignoring invalid attribute: %s" % k)

        return super(Task, self).preprocess_data(new_ds)
Exemple #4
0
    def _load_included_file(self, included_file, iterator, is_handler=False):
        '''
        Loads an included YAML file of tasks, applying the optional set of variables.
        '''

        display.debug("loading included file: %s" % included_file._filename)
        try:
            data = self._loader.load_from_file(included_file._filename)
            if data is None:
                return []
            elif not isinstance(data, list):
                raise AnsibleError(
                    "included task files must contain a list of tasks")

            ti_copy = included_file._task.copy()
            temp_vars = ti_copy.vars.copy()
            temp_vars.update(included_file._args)
            # pop tags out of the include args, if they were specified there, and assign
            # them to the include. If the include already had tags specified, we raise an
            # error so that users know not to specify them both ways
            tags = included_file._task.vars.pop('tags', [])
            if isinstance(tags, string_types):
                tags = tags.split(',')
            if len(tags) > 0:
                if len(included_file._task.tags) > 0:
                    raise AnsibleParserError(
                        "Include tasks should not specify tags in more than one way (both via args and directly on the task). "
                        "Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement",
                        obj=included_file._task._ds)
                display.deprecated(
                    "You should not specify tags in the include parameters. All tags should be specified using the task-level option"
                )
                included_file._task.tags = tags

            ti_copy.vars = temp_vars

            block_list = load_list_of_blocks(
                data,
                play=iterator._play,
                parent_block=None,
                task_include=ti_copy,
                role=included_file._task._role,
                use_handlers=is_handler,
                loader=self._loader,
                variable_manager=self._variable_manager,
            )

            # since we skip incrementing the stats when the task result is
            # first processed, we do so now for each host in the list
            for host in included_file._hosts:
                self._tqm._stats.increment('ok', host.name)

        except AnsibleError as e:
            # mark all of the hosts including this file as failed, send callbacks,
            # and increment the stats for this host
            for host in included_file._hosts:
                tr = TaskResult(host=host,
                                task=included_file._task,
                                return_data=dict(failed=True,
                                                 reason=to_text(e)))
                iterator.mark_host_failed(host)
                self._tqm._failed_hosts[host.name] = True
                self._tqm._stats.increment('failures', host.name)
                self._tqm.send_callback('v2_runner_on_failed', tr)
            return []

        # finally, send the callback and return the list of blocks loaded
        self._tqm.send_callback('v2_playbook_on_include', included_file)
        display.debug("done processing included file")
        return block_list
Exemple #5
0
 def set_subnets(self):
     self.subnets = []
     if 'cidr' not in self.stage:
         raise AnsibleParserError('cidr must be specified')
     mother_name = f'{self.stage_prefix}mother.{self.name}'
     self.inventory.add_host(mother_name, group='mother')
     self.inventory.add_host(mother_name, group=self.stage_name)
     self.inventory.set_variable(mother_name, 'hive_cidr',
                                 self.stage['cidr'])
     if 'prepared_resource_group' in self.stage:
         self.inventory.set_variable(mother_name,
                                     'hive_prepared_resource_group',
                                     self.stage['prepared_resource_group'])
     if 'not_support_az' in self.stage:
         self.inventory.set_variable(mother_name, 'hive_not_support_az',
                                     self.stage['not_support_az'])
     if 'region' in self.stage:
         self.inventory.set_variable(mother_name, 'hive_region',
                                     self.stage['region'])
     if 'bridge' in self.stage:
         self.inventory.set_variable(mother_name, 'hive_bridge',
                                     self.stage['bridge'])
     if 'dev' in self.stage:
         self.inventory.set_variable(mother_name, 'hive_dev',
                                     self.stage['dev'])
     if 'kickstart_config' in self.stage:
         self.inventory.set_variable(mother_name, 'hive_kickstart_config',
                                     self.stage['kickstart_config'])
     if 'subnets' not in self.stage:
         try:
             net = ipaddress.ip_network(self.stage['cidr'])
         except ValueError as e:
             raise AnsibleParserError(str(e))
         default_subnet = {
             'cidr': self.stage['cidr'],
             'name': self.name + '-default'
         }
         self.inventory.set_variable(mother_name, 'hive_subnets',
                                     [default_subnet.copy()])
         default_subnet['netmask'] = str(net.netmask)
         if 'ip_address_list' in self.stage:
             default_subnet['ip_list'] = (
                 y for y in self.stage.get('ip_address_list'))
         else:
             hosts = net.hosts()
             # first one is route, so skip it
             next(hosts)
             # azure use 2 more address https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-faq
             next(hosts)
             next(hosts)
             default_subnet['ip_list'] = map(str, hosts)
         self.subnets.append(default_subnet)
     else:
         var_subnets = []
         for idx, s in enumerate(self.stage['subnets']):
             subnet = s.copy()
             var_subnets.append(s)
             if 'name' not in subnet:
                 subnet['name'] = f'{self.stage_prefix}subnet{idx}'
             if 'cidr' not in subnet:
                 raise AnsibleParserError(
                     'cidr in subnet must be specified')
             try:
                 net = ipaddress.ip_network(subnet['cidr'])
             except ValueError as e:
                 raise AnsibleParserError(str(e))
             hosts = net.hosts()
             # first one is route, so skip it
             next(hosts)
             # aws use 3 more address https://aws.amazon.com/jp/vpc/faqs/
             next(hosts)
             next(hosts)
             next(hosts)
             subnet['ip_list'] = map(str, hosts)
             subnet['netmask'] = str(net.netmask)
             self.subnets.append(subnet)
         self.inventory.set_variable(mother_name, 'hive_subnets',
                                     var_subnets)
    def parse(self, inventory, loader, path, cache=True):

        if not HAS_GOOGLE_LIBRARIES:
            raise AnsibleParserError("gce inventory plugin cannot start: %s" %
                                     missing_required_lib("google-auth"))

        super(InventoryModule, self).parse(inventory, loader, path)

        config_data = {}
        config_data = self._read_config_data(path)

        if self.get_option("use_contrib_script_compatible_sanitization"):
            self._sanitize_group_name = (
                self._legacy_script_compatible_group_sanitization)

        # setup parameters as expected by 'fake module class' to reuse module_utils w/o changing the API
        params = {
            "filters": self.get_option("filters"),
            "projects": self.get_option("projects"),
            "scopes": self.get_option("scopes"),
            "zones": self.get_option("zones"),
            "auth_kind": self.get_option("auth_kind"),
            "service_account_file": self.get_option("service_account_file"),
            "service_account_contents":
            self.get_option("service_account_contents"),
            "service_account_email": self.get_option("service_account_email"),
        }

        self.fake_module = GcpMockModule(params)
        self.auth_session = GcpSession(self.fake_module, "compute")

        query = self._get_query_options(params["filters"])

        if self.get_option("retrieve_image_info"):
            project_disks = self._get_project_disks(config_data, query)
        else:
            project_disks = None

        # Cache logic
        if cache:
            cache = self.get_option("cache")
            cache_key = self.get_cache_key(path)
        else:
            cache_key = None

        cache_needs_update = False
        if cache:
            try:
                results = self._cache[cache_key]
                for project in results:
                    for zone in results[project]:
                        self._add_hosts(
                            results[project][zone],
                            config_data,
                            False,
                            project_disks=project_disks,
                        )
            except KeyError:
                cache_needs_update = True

        if not cache or cache_needs_update:
            cached_data = {}
            for project in params["projects"]:
                cached_data[project] = {}
                params["project"] = project
                zones = params["zones"]
                # Fetch all instances
                link = self._instances % project
                resp = self.fetch_list(params, link, query)
                for key, value in resp.items():
                    zone = key[6:]
                    if not zones or zone in zones:
                        self._add_hosts(value,
                                        config_data,
                                        project_disks=project_disks)
                        cached_data[project][zone] = value

        if cache_needs_update:
            self._cache[cache_key] = cached_data
Exemple #7
0
def split_args(args):
    '''
    Splits args on whitespace, but intelligently reassembles
    those that may have been split over a jinja2 block or quotes.

    When used in a remote module, we won't ever have to be concerned about
    jinja2 blocks, however this function is/will be used in the
    core portions as well before the args are templated.

    example input: a=b c="foo bar"
    example output: ['a=b', 'c="foo bar"']

    Basically this is a variation shlex that has some more intelligence for
    how Ansible needs to use it.
    '''

    # the list of params parsed out of the arg string
    # this is going to be the result value when we are done
    params = []

    # Initial split on newlines
    items = args.split('\n')

    # iterate over the tokens, and reassemble any that may have been
    # split on a space inside a jinja2 block.
    # ex if tokens are "{{", "foo", "}}" these go together

    # These variables are used
    # to keep track of the state of the parsing, since blocks and quotes
    # may be nested within each other.

    quote_char = None
    inside_quotes = False
    print_depth = 0  # used to count nested jinja2 {{ }} blocks
    block_depth = 0  # used to count nested jinja2 {% %} blocks
    comment_depth = 0  # used to count nested jinja2 {# #} blocks

    # now we loop over each split chunk, coalescing tokens if the white space
    # split occurred within quotes or a jinja2 block of some kind
    for (itemidx, item) in enumerate(items):

        # we split on spaces and newlines separately, so that we
        # can tell which character we split on for reassembly
        # inside quotation characters
        tokens = item.split(' ')

        line_continuation = False
        for (idx, token) in enumerate(tokens):

            # Empty entries means we have subsequent spaces
            # We want to hold onto them so we can reconstruct them later
            if len(token) == 0 and idx != 0:
                params[-1] += ' '
                continue

            # if we hit a line continuation character, but
            # we're not inside quotes, ignore it and continue
            # on to the next token while setting a flag
            if token == '\\' and not inside_quotes:
                line_continuation = True
                continue

            # store the previous quoting state for checking later
            was_inside_quotes = inside_quotes
            quote_char = _get_quote_state(token, quote_char)
            inside_quotes = quote_char is not None

            # multiple conditions may append a token to the list of params,
            # so we keep track with this flag to make sure it only happens once
            # append means add to the end of the list, don't append means concatenate
            # it to the end of the last token
            appended = False

            # if we're inside quotes now, but weren't before, append the token
            # to the end of the list, since we'll tack on more to it later
            # otherwise, if we're inside any jinja2 block, inside quotes, or we were
            # inside quotes (but aren't now) concat this token to the last param
            if inside_quotes and not was_inside_quotes and not (
                    print_depth or block_depth or comment_depth):
                params.append(token)
                appended = True
            elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
                if idx == 0 and was_inside_quotes:
                    params[-1] = "%s%s" % (params[-1], token)
                elif len(tokens) > 1:
                    spacer = ''
                    if idx > 0:
                        spacer = ' '
                    params[-1] = "%s%s%s" % (params[-1], spacer, token)
                else:
                    params[-1] = "%s\n%s" % (params[-1], token)
                appended = True

            # if the number of paired block tags is not the same, the depth has changed, so we calculate that here
            # and may append the current token to the params (if we haven't previously done so)
            prev_print_depth = print_depth
            print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
            if print_depth != prev_print_depth and not appended:
                params.append(token)
                appended = True

            prev_block_depth = block_depth
            block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
            if block_depth != prev_block_depth and not appended:
                params.append(token)
                appended = True

            prev_comment_depth = comment_depth
            comment_depth = _count_jinja2_blocks(token, comment_depth, "{#",
                                                 "#}")
            if comment_depth != prev_comment_depth and not appended:
                params.append(token)
                appended = True

            # finally, if we're at zero depth for all blocks and not inside quotes, and have not
            # yet appended anything to the list of params, we do so now
            if not (print_depth or block_depth or comment_depth
                    ) and not inside_quotes and not appended and token != '':
                params.append(token)

        # if this was the last token in the list, and we have more than
        # one item (meaning we split on newlines), add a newline back here
        # to preserve the original structure
        if len(items
               ) > 1 and itemidx != len(items) - 1 and not line_continuation:
            params[-1] += '\n'

        # always clear the line continuation flag
        line_continuation = False

    # If we're done and things are not at zero depth or we're still inside quotes,
    # raise an error to indicate that the args were unbalanced
    if print_depth or block_depth or comment_depth or inside_quotes:
        raise AnsibleParserError(
            u"failed at splitting arguments, either an unbalanced jinja2 block or quotes: {0}"
            .format(args))

    return params
Exemple #8
0
    def _load_role_data(self, role_include, parent_role=None):
        self._role_name = role_include.role
        self._role_path = role_include.get_role_path()
        self._role_params = role_include.get_role_params()
        self._variable_manager = role_include.get_variable_manager()
        self._loader = role_include.get_loader()

        if parent_role:
            self.add_parent(parent_role)

        # copy over all field attributes, except for when and tags, which
        # are special cases and need to preserve pre-existing values
        for (attr_name, _) in iteritems(self._valid_attrs):
            if attr_name not in ('when', 'tags'):
                setattr(self, attr_name, getattr(role_include, attr_name))

        current_when = getattr(self, 'when')[:]
        current_when.extend(role_include.when)
        setattr(self, 'when', current_when)

        current_tags = getattr(self, 'tags')[:]
        current_tags.extend(role_include.tags)
        setattr(self, 'tags', current_tags)

        # dynamically load any plugins from the role directory
        for name, obj in get_all_plugin_loaders():
            if obj.subdir:
                plugin_path = os.path.join(self._role_path, obj.subdir)
                if os.path.isdir(plugin_path):
                    obj.add_directory(plugin_path)

        # load the role's other files, if they exist
        metadata = self._load_role_yaml('meta')
        if metadata:
            self._metadata = RoleMetadata.load(
                metadata,
                owner=self,
                variable_manager=self._variable_manager,
                loader=self._loader)
            self._dependencies = self._load_dependencies()
        else:
            self._metadata = RoleMetadata()

        task_data = self._load_role_yaml('tasks',
                                         main=self._from_files.get('tasks'))
        if task_data:
            try:
                self._task_blocks = load_list_of_blocks(
                    task_data,
                    play=self._play,
                    role=self,
                    loader=self._loader,
                    variable_manager=self._variable_manager)
            except AssertionError:
                raise AnsibleParserError(
                    "The tasks/main.yml file for role '%s' must contain a list of tasks"
                    % self._role_name,
                    obj=task_data)

        handler_data = self._load_role_yaml('handlers')
        if handler_data:
            try:
                self._handler_blocks = load_list_of_blocks(
                    handler_data,
                    play=self._play,
                    role=self,
                    use_handlers=True,
                    loader=self._loader,
                    variable_manager=self._variable_manager)
            except AssertionError:
                raise AnsibleParserError(
                    "The handlers/main.yml file for role '%s' must contain a list of tasks"
                    % self._role_name,
                    obj=handler_data)

        # vars and default vars are regular dictionaries
        self._role_vars = self._load_role_yaml(
            'vars', main=self._from_files.get('vars'))
        if self._role_vars is None:
            self._role_vars = dict()
        elif not isinstance(self._role_vars, dict):
            raise AnsibleParserError(
                "The vars/main.yml file for role '%s' must contain a dictionary of variables"
                % self._role_name)

        self._default_vars = self._load_role_yaml(
            'defaults', main=self._from_files.get('defaults'))
        if self._default_vars is None:
            self._default_vars = dict()
        elif not isinstance(self._default_vars, dict):
            raise AnsibleParserError(
                "The defaults/main.yml file for role '%s' must contain a dictionary of variables"
                % self._role_name)
Exemple #9
0
    def _load_included_file(self, included_file, iterator, is_handler=False):
        '''
        Loads an included YAML file of tasks, applying the optional set of variables.
        '''

        display.debug("loading included file: %s" % included_file._filename)
        try:
            data = self._loader.load_from_file(included_file._filename)
            if data is None:
                return []
            elif not isinstance(data, list):
                raise AnsibleError(
                    "included task files must contain a list of tasks")

            block_list = load_list_of_blocks(
                data,
                play=included_file._task._block._play,
                parent_block=included_file._task._block,
                task_include=included_file._task,
                role=included_file._task._role,
                use_handlers=is_handler,
                loader=self._loader)

            # since we skip incrementing the stats when the task result is
            # first processed, we do so now for each host in the list
            for host in included_file._hosts:
                self._tqm._stats.increment('ok', host.name)

        except AnsibleError as e:
            # mark all of the hosts including this file as failed, send callbacks,
            # and increment the stats for this host
            for host in included_file._hosts:
                tr = TaskResult(host=host,
                                task=included_file._task,
                                return_data=dict(failed=True, reason=str(e)))
                iterator.mark_host_failed(host)
                self._tqm._failed_hosts[host.name] = True
                self._tqm._stats.increment('failures', host.name)
                self._tqm.send_callback('v2_runner_on_failed', tr)
            return []

        # set the vars for this task from those specified as params to the include
        for b in block_list:
            # first make a copy of the including task, so that each has a unique copy to modify
            # FIXME: not sure if this is the best way to fix this, as we might be losing
            #        information in the copy. Previously we assigned the include params to
            #        the block variables directly, which caused other problems, so we may
            #        need to figure out a third option if this also presents problems.
            b._task_include = b._task_include.copy(exclude_block=True)
            # then we create a temporary set of vars to ensure the variable reference is unique
            temp_vars = b._task_include.vars.copy()
            temp_vars.update(included_file._args.copy())
            # pop tags out of the include args, if they were specified there, and assign
            # them to the include. If the include already had tags specified, we raise an
            # error so that users know not to specify them both ways
            tags = temp_vars.pop('tags', [])
            if isinstance(tags, string_types):
                tags = [tags]
            if len(tags) > 0:
                if len(b._task_include.tags) > 0:
                    raise AnsibleParserError(
                        "Include tasks should not specify tags in more than one way (both via args and directly on the task)",
                        obj=included_file._task._ds)
                display.deprecated(
                    "You should not specify tags in the include parameters. All tags should be specified using the task-level option"
                )
                b._task_include.tags = tags
            b._task_include.vars = temp_vars

        # finally, send the callback and return the list of blocks loaded
        self._tqm.send_callback('v2_playbook_on_include', included_file)
        display.debug("done processing included file")
        return block_list
Exemple #10
0
 def _validate_always(self, attr, name, value):
     if value and not self.block:
         raise AnsibleParserError(
             "'%s' keyword cannot be used without 'block'" % name,
             obj=self._ds)
Exemple #11
0
def handle_error(**kwargs):
    raise AnsibleParserError(to_native(kwargs.get('msg')))
Exemple #12
0
 def _get_file_contents(self, path):
     path = to_text(path)
     if path in self._file_mapping:
         return (to_bytes(self._file_mapping[path]), False)
     else:
         raise AnsibleParserError("file not found: %s" % path)
Exemple #13
0
    def parse(self, inventory, loader, path, cache=True):

        super(InventoryModule, self).parse(inventory, loader, path)

        cache_key = self._get_cache_prefix(path)

        # file is config file
        self._config_data = self._read_config_data(path)

        msg = ''
        if not self._config_data:
            msg = 'File empty. this is not my config file'
        elif 'plugin' in self._config_data and self._config_data[
                'plugin'] != self.NAME:
            msg = 'plugin config file, but not for us: %s' % self._config_data[
                'plugin']
        elif 'plugin' not in self._config_data and 'clouds' not in self._config_data:
            msg = "it's not a plugin configuration nor a clouds.yaml file"
        elif not HAS_SDK:
            msg = "openstacksdk is required for the OpenStack inventory plugin. OpenStack inventory sources will be skipped."

        if msg:
            raise AnsibleParserError(msg)

        # The user has pointed us at a clouds.yaml file. Use defaults for
        # everything.
        if 'clouds' in self._config_data:
            self._config_data = {}

        if cache:
            cache = self.get_option('cache')
        source_data = None
        if cache:
            try:
                source_data = self.cache.get(cache_key)
            except KeyError:
                pass

        if not source_data:
            clouds_yaml_path = self._config_data.get('clouds_yaml_path')
            if clouds_yaml_path:
                config_files = (clouds_yaml_path + client_config.CONFIG_FILES)
            else:
                config_files = None

            # Redict logging to stderr so it does not mix with output
            # particular ansible-inventory JSON output
            # TODO(mordred) Integrate openstack's logging with ansible's logging
            sdk.enable_logging(stream=sys.stderr)

            cloud_inventory = sdk_inventory.OpenStackInventory(
                config_files=config_files,
                private=self._config_data.get('private', False))
            only_clouds = self._config_data.get('only_clouds', [])
            if only_clouds and not isinstance(only_clouds, list):
                raise ValueError(
                    'OpenStack Inventory Config Error: only_clouds must be'
                    ' a list')
            if only_clouds:
                new_clouds = []
                for cloud in cloud_inventory.clouds:
                    if cloud.name in only_clouds:
                        new_clouds.append(cloud)
                cloud_inventory.clouds = new_clouds

            expand_hostvars = self._config_data.get('expand_hostvars', False)
            fail_on_errors = self._config_data.get('fail_on_errors', False)

            source_data = cloud_inventory.list_hosts(
                expand=expand_hostvars, fail_on_cloud_config=fail_on_errors)

            if self.cache is not None:
                self.cache.set(cache_key, source_data)

        self._populate_from_source(source_data)
Exemple #14
0
 def _get_file_contents(self, path):
     if path in self._file_mapping:
         return (self._file_mapping[path], False)
     else:
         raise AnsibleParserError("file not found: %s" % path)
Exemple #15
0
    def parse(self, inventory, loader, path, cache=True):

        super(InventoryModule, self).parse(inventory, loader, path)

        cache_key = self.get_cache_prefix(path)

        # file is config file
        try:
            self._config_data = self.loader.load_from_file(path)
        except Exception as e:
            raise AnsibleParserError(e)

        msg = ''
        if not self._config_data:
            msg = 'File empty. this is not my config file'
        elif 'plugin' in self._config_data and self._config_data[
                'plugin'] != self.NAME:
            msg = 'plugin config file, but not for us: %s' % self._config_data[
                'plugin']
        elif 'plugin' not in self._config_data and 'clouds' not in self._config_data:
            msg = "it's not a plugin configuration nor a clouds.yaml file"
        elif not HAS_SHADE:
            msg = "shade is required for the OpenStack inventory plugin. OpenStack inventory sources will be skipped."

        if msg:
            raise AnsibleParserError(msg)

        # The user has pointed us at a clouds.yaml file. Use defaults for
        # everything.
        if 'clouds' in self._config_data:
            self._config_data = {}

        source_data = None
        if cache and cache_key in inventory.cache:
            try:
                source_data = inventory.cache[cache_key]
            except KeyError:
                pass

        if not source_data:
            clouds_yaml_path = self._config_data.get('clouds_yaml_path')
            if clouds_yaml_path:
                config_files = (clouds_yaml_path +
                                os_client_config.config.CONFIG_FILES)
            else:
                config_files = None

            # TODO(mordred) Integrate shade's logging with ansible's logging
            shade.simple_logging()

            cloud_inventory = shade.inventory.OpenStackInventory(
                config_files=config_files,
                private=self._config_data.get('private', False))
            only_clouds = self._config_data.get('only_clouds', [])
            if only_clouds and not isinstance(only_clouds, list):
                raise ValueError(
                    'OpenStack Inventory Config Error: only_clouds must be'
                    ' a list')
            if only_clouds:
                new_clouds = []
                for cloud in cloud_inventory.clouds:
                    if cloud.name in only_clouds:
                        new_clouds.append(cloud)
                cloud_inventory.clouds = new_clouds

            expand_hostvars = self._config_data.get('expand_hostvars', False)
            fail_on_errors = self._config_data.get('fail_on_errors', False)

            source_data = cloud_inventory.list_hosts(
                expand=expand_hostvars, fail_on_cloud_config=fail_on_errors)

            inventory.cache[cache_key] = source_data

        self._populate_from_source(source_data)
Exemple #16
0
    def parse(self, inventory, loader, path, cache=True):

        if not HAS_GOOGLE_LIBRARIES:
            raise AnsibleParserError('gce inventory plugin cannot start: %s' %
                                     missing_required_lib('google-auth'))

        super(InventoryModule, self).parse(inventory, loader, path)

        config_data = {}
        config_data = self._read_config_data(path)

        if self.get_option('use_contrib_script_compatible_sanitization'):
            self._sanitize_group_name = self._legacy_script_compatible_group_sanitization

        # setup parameters as expected by 'fake module class' to reuse module_utils w/o changing the API
        params = {
            'filters': self.get_option('filters'),
            'projects': self.get_option('projects'),
            'scopes': self.get_option('scopes'),
            'zones': self.get_option('zones'),
            'auth_kind': self.get_option('auth_kind'),
            'service_account_file': self.get_option('service_account_file'),
            'service_account_contents':
            self.get_option('service_account_contents'),
            'service_account_email': self.get_option('service_account_email'),
        }

        self.fake_module = GcpMockModule(params)
        self.auth_session = GcpSession(self.fake_module, 'compute')

        query = self._get_query_options(params['filters'])

        if self.get_option('retrieve_image_info'):
            project_disks = self._get_project_disks(config_data, query)
        else:
            project_disks = None

        # Cache logic
        if cache:
            cache = self.get_option('cache')
            cache_key = self.get_cache_key(path)
        else:
            cache_key = None

        cache_needs_update = False
        if cache:
            try:
                results = self._cache[cache_key]
                for project in results:
                    for zone in results[project]:
                        self._add_hosts(results[project][zone],
                                        config_data,
                                        False,
                                        project_disks=project_disks)
            except KeyError:
                cache_needs_update = True

        if not cache or cache_needs_update:
            cached_data = {}
            for project in params['projects']:
                cached_data[project] = {}
                params['project'] = project
                zones = params['zones']
                # Fetch all instances
                link = self._instances % project
                resp = self.fetch_list(params, link, query)
                for key, value in resp.get('items').items():
                    if 'instances' in value:
                        # Key is in format: "zones/europe-west1-b"
                        zone = key[6:]
                        if not zones or zone in zones:
                            self._add_hosts(value['instances'],
                                            config_data,
                                            project_disks=project_disks)
                            cached_data[project][zone] = value['instances']

        if cache_needs_update:
            self._cache[cache_key] = cached_data
Exemple #17
0
    def post_validate(self, templar):
        '''
        we can't tell that everything is of the right type until we have
        all the variables.  Run basic types (from isa) as well as
        any _post_validate_<foo> functions.
        '''

        # save the omit value for later checking
        omit_value = templar._available_variables.get('omit')

        for (name, attribute) in iteritems(self._get_base_attributes()):

            if getattr(self, name) is None:
                if not attribute.required:
                    continue
                else:
                    raise AnsibleParserError(
                        "the field '%s' is required but was not set" % name)
            elif not attribute.always_post_validate and self.__class__.__name__ not in (
                    'Task', 'Handler', 'PlayContext'):
                # Intermediate objects like Play() won't have their fields validated by
                # default, as their values are often inherited by other objects and validated
                # later, so we don't want them to fail out early
                continue

            try:
                # Run the post-validator if present. These methods are responsible for
                # using the given templar to template the values, if required.
                method = getattr(self, '_post_validate_%s' % name, None)
                if method:
                    value = method(attribute, getattr(self, name), templar)
                else:
                    # if the attribute contains a variable, template it now
                    value = templar.template(getattr(self, name))

                # if this evaluated to the omit value, set the value back to
                # the default specified in the FieldAttribute and move on
                if omit_value is not None and value == omit_value:
                    setattr(self, name, attribute.default)
                    continue

                # and make sure the attribute is of the type it should be
                if value is not None:
                    if attribute.isa == 'string':
                        value = to_unicode(value)
                    elif attribute.isa == 'int':
                        value = int(value)
                    elif attribute.isa == 'float':
                        value = float(value)
                    elif attribute.isa == 'bool':
                        value = boolean(value)
                    elif attribute.isa == 'percent':
                        # special value, which may be an integer or float
                        # with an optional '%' at the end
                        if isinstance(value, string_types) and '%' in value:
                            value = value.replace('%', '')
                        value = float(value)
                    elif attribute.isa == 'list':
                        if value is None:
                            value = []
                        elif not isinstance(value, list):
                            value = [value]
                        if attribute.listof is not None:
                            for item in value:
                                if not isinstance(item, attribute.listof):
                                    raise AnsibleParserError(
                                        "the field '%s' should be a list of %s,"
                                        " but the item '%s' is a %s" %
                                        (name, attribute.listof, item,
                                         type(item)),
                                        obj=self.get_ds())
                                elif attribute.required and attribute.listof == string_types:
                                    if item is None or item.strip() == "":
                                        raise AnsibleParserError(
                                            "the field '%s' is required, and cannot have empty values"
                                            % (name, ),
                                            obj=self.get_ds())
                    elif attribute.isa == 'set':
                        if value is None:
                            value = set()
                        else:
                            if not isinstance(value, (list, set)):
                                value = [value]
                            if not isinstance(value, set):
                                value = set(value)
                    elif attribute.isa == 'dict':
                        if value is None:
                            value = dict()
                        elif not isinstance(value, dict):
                            raise TypeError("%s is not a dictionary" % value)

                # and assign the massaged value back to the attribute field
                setattr(self, name, value)

            except (TypeError, ValueError) as e:
                raise AnsibleParserError(
                    "the field '%s' has an invalid value (%s), and could not be converted to an %s."
                    " Error was: %s" % (name, value, attribute.isa, e),
                    obj=self.get_ds())
            except UndefinedError as e:
                if templar._fail_on_undefined_errors and name != 'name':
                    raise AnsibleParserError(
                        "the field '%s' has an invalid value, which appears to include a variable that is undefined."
                        " The error was: %s" % (name, e),
                        obj=self.get_ds())
    def parse(self, inventory, loader, path, cache=True):

        super(InventoryModule, self).parse(inventory, loader, path)

        cache_key = self._get_cache_prefix(path)

        # file is config file
        self._config_data = self._read_config_data(path)

        msg = ''
        if not self._config_data:
            msg = 'File empty. this is not my config file'
        elif 'plugin' in self._config_data and self._config_data[
                'plugin'] not in (self.NAME, 'openstack'):
            msg = 'plugin config file, but not for us: %s' % self._config_data[
                'plugin']
        elif 'plugin' not in self._config_data and 'clouds' not in self._config_data:
            msg = "it's not a plugin configuration nor a clouds.yaml file"
        elif not HAS_SDK:
            msg = "openstacksdk is required for the OpenStack inventory plugin. OpenStack inventory sources will be skipped."

        if msg:
            display.vvvv(msg)
            raise AnsibleParserError(msg)

        if 'clouds' in self._config_data:
            self.display.vvvv(
                "Found clouds config file instead of plugin config. "
                "Using default configuration.")
            self._config_data = {}

        # update cache if the user has caching enabled and the cache is being refreshed
        # will update variable below in the case of an expired cache
        cache_needs_update = not cache and self.get_option('cache')

        if cache:
            cache = self.get_option('cache')
        source_data = None
        if cache:
            self.display.vvvv("Reading inventory data from cache: %s" %
                              cache_key)
            try:
                source_data = self._cache[cache_key]
            except KeyError:
                # cache expired or doesn't exist yet
                display.vvvv("Inventory data cache not found")
                cache_needs_update = True

        if not source_data:
            self.display.vvvv("Getting hosts from Openstack clouds")
            clouds_yaml_path = self._config_data.get('clouds_yaml_path')
            if clouds_yaml_path:
                config_files = (clouds_yaml_path + client_config.CONFIG_FILES)
            else:
                config_files = None

            # Redict logging to stderr so it does not mix with output
            # particular ansible-inventory JSON output
            # TODO(mordred) Integrate openstack's logging with ansible's logging
            if self.display.verbosity > 3:
                sdk.enable_logging(debug=True, stream=sys.stderr)
            else:
                sdk.enable_logging(stream=sys.stderr)

            cloud_inventory = sdk_inventory.OpenStackInventory(
                config_files=config_files,
                private=self._config_data.get('private', False))
            self.display.vvvv("Found %d cloud(s) in Openstack" %
                              len(cloud_inventory.clouds))
            only_clouds = self._config_data.get('only_clouds', [])
            if only_clouds and not isinstance(only_clouds, list):
                raise ValueError(
                    'OpenStack Inventory Config Error: only_clouds must be'
                    ' a list')
            if only_clouds:
                new_clouds = []
                for cloud in cloud_inventory.clouds:
                    self.display.vvvv("Looking at cloud : %s" % cloud.name)
                    if cloud.name in only_clouds:
                        self.display.vvvv("Selecting cloud : %s" % cloud.name)
                        new_clouds.append(cloud)
                cloud_inventory.clouds = new_clouds

            self.display.vvvv("Selected %d cloud(s)" %
                              len(cloud_inventory.clouds))

            expand_hostvars = self._config_data.get('expand_hostvars', False)
            fail_on_errors = self._config_data.get('fail_on_errors', False)
            all_projects = self._config_data.get('all_projects', False)

            source_data = []
            try:
                source_data = cloud_inventory.list_hosts(
                    expand=expand_hostvars,
                    fail_on_cloud_config=fail_on_errors,
                    all_projects=all_projects)
            except Exception as e:
                self.display.warning("Couldn't list Openstack hosts. "
                                     "See logs for details")
                os_logger.error(e.message)
            finally:
                if cache_needs_update:
                    self._cache[cache_key] = source_data

        self._populate_from_source(source_data)
Exemple #19
0
    def _load_playbook_data(self, file_name, variable_manager, vars=None):

        if os.path.isabs(file_name):
            self._basedir = os.path.dirname(file_name)
        else:
            self._basedir = os.path.normpath(
                os.path.join(self._basedir, os.path.dirname(file_name)))

        # set the loaders basedir
        cur_basedir = self._loader.get_basedir()
        self._loader.set_basedir(self._basedir)

        self._file_name = file_name

        # dynamically load any plugins from the playbook directory
        for name, obj in get_all_plugin_loaders():
            if obj.subdir:
                plugin_path = os.path.join(self._basedir, obj.subdir)
                if os.path.isdir(to_bytes(plugin_path)):
                    obj.add_directory(plugin_path)

        try:
            ds = self._loader.load_from_file(os.path.basename(file_name))
        except UnicodeDecodeError as e:
            raise AnsibleParserError(
                "Could not read playbook (%s) due to encoding issues: %s" %
                (file_name, to_native(e)))

        # check for errors and restore the basedir in case this error is caught and handled
        if not ds:
            self._loader.set_basedir(cur_basedir)
            raise AnsibleParserError("Empty playbook, nothing to do", obj=ds)
        elif not isinstance(ds, list):
            self._loader.set_basedir(cur_basedir)
            raise AnsibleParserError(
                "A playbook must be a list of plays, got a %s instead" %
                type(ds),
                obj=ds)

        # Parse the playbook entries. For plays, we simply parse them
        # using the Play() object, and includes are parsed using the
        # PlaybookInclude() object
        for entry in ds:
            if not isinstance(entry, dict):
                # restore the basedir in case this error is caught and handled
                self._loader.set_basedir(cur_basedir)
                raise AnsibleParserError(
                    "playbook entries must be either a valid play or an include statement",
                    obj=entry)

            if any(action in entry
                   for action in ('import_playbook', 'include')):
                if 'include' in entry:
                    display.deprecated(
                        "'include' for playbook includes. You should use 'import_playbook' instead",
                        version="2.12")
                pb = PlaybookInclude.load(entry,
                                          basedir=self._basedir,
                                          variable_manager=variable_manager,
                                          loader=self._loader)
                if pb is not None:
                    self._entries.extend(pb._entries)
                else:
                    which = entry.get('import_playbook',
                                      entry.get('include', entry))
                    display.display(
                        "skipping playbook '%s' due to conditional test failure"
                        % which,
                        color=C.COLOR_SKIP)
            else:
                entry_obj = Play.load(entry,
                                      variable_manager=variable_manager,
                                      loader=self._loader,
                                      vars=vars)
                self._entries.append(entry_obj)

        # we're done, so restore the old basedir in the loader
        self._loader.set_basedir(cur_basedir)
    def parse(self, inventory, loader, path, cache=False):

        if self._nmap is None:
            raise AnsibleParserError(
                'nmap inventory plugin requires the nmap cli tool to work')

        super(InventoryModule, self).parse(inventory,
                                           loader,
                                           path,
                                           cache=cache)

        self._read_config_data(path)

        # setup command
        cmd = [self._nmap]
        if not self._options['ports']:
            cmd.append('-sP')

        if self._options['ipv4'] and not self._options['ipv6']:
            cmd.append('-4')
        elif self._options['ipv6'] and not self._options['ipv4']:
            cmd.append('-6')
        elif not self._options['ipv6'] and not self._options['ipv4']:
            raise AnsibleParserError(
                'One of ipv4 or ipv6 must be enabled for this plugin')

        if self._options['exclude']:
            cmd.append('--exclude')
            cmd.append(','.join(self._options['exclude']))

        cmd.append(self._options['address'])
        try:
            # execute
            p = Popen(cmd, stdout=PIPE, stderr=PIPE)
            stdout, stderr = p.communicate()
            if p.returncode != 0:
                raise AnsibleParserError('Failed to run nmap, rc=%s: %s' %
                                         (p.returncode, to_native(stderr)))

            # parse results
            host = None
            ip = None
            ports = []
            for line in stdout.splitlines():
                hits = self.find_host.match(line)
                if hits:
                    if host is not None:
                        self.inventory.set_variable(host, 'ports', ports)

                    # if dns only shows arpa, just use ip instead as hostname
                    if hits.group(1).endswith('.in-addr.arpa'):
                        host = hits.group(2)
                    else:
                        host = hits.group(1)

                    ip = hits.group(2)

                    if host is not None:
                        # update inventory
                        self.inventory.add_host(host)
                        self.inventory.set_variable(host, 'ip', ip)
                        ports = []
                    continue

                host_ports = self.find_port.match(line)
                if host is not None and host_ports:
                    ports.append({
                        'port': host_ports.group(1),
                        'protocol': host_ports.group(2),
                        'state': host_ports.group(3),
                        'service': host_ports.group(4)
                    })
                    continue

                # TODO: parse more data, OS?

            # if any lefotvers
            if host and ports:
                self.inventory.set_variable(host, 'ports', ports)

        except Exception as e:
            raise AnsibleParserError("failed to parse %s: %s " %
                                     (to_native(path), to_native(e)))
Exemple #21
0
    def parse_source(self, source, cache=True):
        ''' Generate or update inventory for the source provided '''

        parsed = False
        display.debug(u'Examining possible inventory source: %s' % source)

        b_source = to_bytes(source)
        # process directories as a collection of inventories
        if os.path.isdir(b_source):
            display.debug(u'Searching for inventory files in directory: %s' %
                          source)
            for i in sorted(os.listdir(b_source)):

                display.debug(u'Considering %s' % i)
                # Skip hidden files and stuff we explicitly ignore
                if IGNORED.search(i):
                    continue

                # recursively deal with directory entries
                fullpath = os.path.join(b_source, i)
                parsed_this_one = self.parse_source(to_native(fullpath))
                display.debug(u'parsed %s as %s' % (fullpath, parsed_this_one))
                if not parsed:
                    parsed = parsed_this_one
        else:
            # left with strings or files, let plugins figure it out

            # set so new hosts can use for inventory_file/dir vasr
            self._inventory.current_source = source

            # get inventory plugins if needed, there should always be at least one generator
            if not self._inventory_plugins:
                self._setup_inventory_plugins()

            # try source with each plugin
            failures = []
            for plugin in self._inventory_plugins:
                plugin_name = to_native(
                    getattr(plugin, '_load_name',
                            getattr(plugin, '_original_path', '')))
                display.debug(u'Attempting to use plugin %s (%s)' %
                              (plugin_name, plugin._original_path))

                # initialize
                if plugin.verify_file(source):
                    try:
                        plugin.parse(self._inventory,
                                     self._loader,
                                     source,
                                     cache=cache)
                        parsed = True
                        display.vvv(
                            'Parsed %s inventory source with %s plugin' %
                            (to_native(source), plugin_name))
                        break
                    except AnsibleParserError as e:
                        display.debug('%s did not meet %s requirements' %
                                      (to_native(source), plugin_name))
                        failures.append({
                            'src': source,
                            'plugin': plugin_name,
                            'exc': e
                        })
                else:
                    display.debug('%s did not meet %s requirements' %
                                  (to_native(source), plugin_name))
            else:
                if not parsed and failures:
                    # only if no plugin processed files should we show errors.
                    if C.INVENTORY_UNPARSED_IS_FAILED:
                        msg = "Could not parse inventory source %s with available plugins:\n" % source
                        for fail in failures:
                            msg += 'Plugin %s failed: %s\n' % (
                                fail['plugin'], to_native(fail['exc']))
                            if display.verbosity >= 3:
                                msg += "%s\n" % fail['exc'].tb
                        raise AnsibleParserError(msg)
                    else:
                        for fail in failures:
                            display.warning(
                                '\n* Failed to parse %s with %s plugin: %s' %
                                (to_native(fail['src']), fail['plugin'],
                                 to_native(fail['exc'])))
                            display.vvv(fail['exc'].tb)

        if not parsed:
            display.warning("Unable to parse %s as an inventory source" %
                            to_native(source))

        # clear up, jic
        self._inventory.current_source = None

        return parsed
Exemple #22
0
    def get_vars(self, loader, path, entities, cache=True):
        ''' parses the inventory file '''

        if not isinstance(entities, list):
            entities = [entities]

        super(VarsModule, self).get_vars(loader, path, entities)

        if 'api_server' not in OPTIONS.keys():
            self.loader = loader
            config_file_path = path + "/graphql_plugin.yaml"
            self.display.v('Load vars plugin configuration file {}'.format(
                config_file_path))

            if self.verify_file(config_file_path):
                self.parse_config_file(config_file_path)
            else:
                return {}

        self.api_server = OPTIONS['api_server']
        self.api_token = OPTIONS['api_token']

        data = {}
        for entity in entities:
            if isinstance(entity, Host):
                subdir = 'host_vars'
            elif isinstance(entity, Group):
                subdir = 'group_vars'
            else:
                raise AnsibleParserError(
                    "Supplied entity must be Host or Group, got %s instead" %
                    (type(entity)))

            if isinstance(entity, Group):
                key = "Group_%s" % entity.name
                if cache and key in FOUND:
                    self.display.v('Load vars from cache')
                    new_data = FOUND[key]
                else:
                    self.display.v('Load vars from graphql api {}'.format(
                        self.api_server))
                    try:

                        # Select your transport with a defined url endpoint
                        transport = AIOHTTPTransport(
                            url="https://{}/graphql".format(self.api_server))

                        # Create a GraphQL client using the defined transport
                        client = Client(transport=transport,
                                        fetch_schema_from_transport=True)

                        # Provide a GraphQL query
                        query = gql('''
                            query {
                                groupByName(groupName: "''' + entity.name +
                                    '''")
                                {
                                    ansible_group_name
                                    variables
                                }
                            }
                        ''')

                        # Execute the query on the transport
                        new_data = client.execute(query)

                        if new_data["groupByName"] != None:
                            new_data = new_data["groupByName"]["variables"]
                        else:
                            new_data = {}

                        FOUND[key] = new_data

                    except Exception as e:
                        raise AnsibleParserError(e)

                data = combine_vars(data, new_data)

        return data
Exemple #23
0
 def set_provider(self):
     if 'provider' not in self.stage:
         raise AnsibleParserError('provider must be specified')
     self.provider = self.stage['provider']
     if self.provider not in [
             'vagrant', 'aws', 'azure', 'gcp', 'openstack', 'prepared',
             'kickstart'
     ]:
         raise AnsibleParserError(
             f'provider must be one of "vagrant", "aws", "azure", "gcp", "openstack", "prepared", "kickstart", but specified {self.provider}'
         )
     if self.provider != 'kickstart' and 'kickstart_config' in self.stage:
         raise AnsibleParserError(
             'kickstart_config cannot be specified when provider is not kickstart'
         )
     if self.provider == 'vagrant':
         if 'instance_type' in self.stage:
             raise AnsibleParserError(
                 'instance_type cannot be specified when provider is vagrant'
             )
         if 'region' in self.stage:
             raise AnsibleParserError(
                 'region cannot be specified when provider is vagrant')
     else:
         if 'prepared_resource_group' in self.stage and self.provider != 'azure':
             raise AnsibleParserError(
                 'prepared_resource_group is only available when provider is "azure"'
             )
         if 'not_support_az' in self.stage and self.provider != 'azure':
             raise AnsibleParserError(
                 'not_support_az is only available when provider is "azure"'
             )
         if self.provider not in ['kickstart', 'prepared'
                                  ] and 'memory_size' in self.stage:
             raise AnsibleParserError(
                 'memory_size cannot be specified when provider is IaaS')
         if 'repository_memory_size' in self.stage:
             raise AnsibleParserError(
                 'repository_memory_size cannot be specified when provider is IaaS'
             )
         if 'dev' in self.stage:
             raise AnsibleParserError(
                 'dev cannot be specified when provider is IaaS')
         if 'bridge' in self.stage:
             raise AnsibleParserError(
                 'bridge cannot be specified when provider is IaaS')
Exemple #24
0
    def get_vars(self,
                 play=None,
                 host=None,
                 task=None,
                 include_hostvars=True,
                 include_delegate_to=True,
                 use_cache=True):
        '''
        Returns the variables, with optional "context" given via the parameters
        for the play, host, and task (which could possibly result in different
        sets of variables being returned due to the additional context).

        The order of precedence is:
        - play->roles->get_default_vars (if there is a play context)
        - group_vars_files[host] (if there is a host context)
        - host_vars_files[host] (if there is a host context)
        - host->get_vars (if there is a host context)
        - fact_cache[host] (if there is a host context)
        - play vars (if there is a play context)
        - play vars_files (if there's no host context, ignore
          file names that cannot be templated)
        - task->get_vars (if there is a task context)
        - vars_cache[host] (if there is a host context)
        - extra vars
        '''

        display.debug("in VariableManager get_vars()")

        all_vars = dict()
        magic_variables = self._get_magic_variables(
            play=play,
            host=host,
            task=task,
            include_hostvars=include_hostvars,
            include_delegate_to=include_delegate_to,
        )

        # default for all cases
        basedirs = []
        if self.safe_basedir:  # avoid adhoc/console loading cwd
            basedirs = [self._loader.get_basedir()]

        if play:
            # first we compile any vars specified in defaults/main.yml
            # for all roles within the specified play
            for role in play.get_roles():
                all_vars = combine_vars(all_vars, role.get_default_vars())

        if task:
            # set basedirs
            if C.PLAYBOOK_VARS_ROOT == 'all':  # should be default
                basedirs = task.get_search_path()
            elif C.PLAYBOOK_VARS_ROOT in (
                    'bottom', 'playbook_dir'):  # only option in 2.4.0
                basedirs = [task.get_search_path()[0]]
            elif C.PLAYBOOK_VARS_ROOT != 'top':
                # preserves default basedirs, only option pre 2.3
                raise AnsibleError('Unkown playbook vars logic: %s' %
                                   C.PLAYBOOK_VARS_ROOT)

            # if we have a task in this context, and that task has a role, make
            # sure it sees its defaults above any other roles, as we previously
            # (v1) made sure each task had a copy of its roles default vars
            if task._role is not None and (play
                                           or task.action == 'include_role'):
                all_vars = combine_vars(
                    all_vars,
                    task._role.get_default_vars(
                        dep_chain=task.get_dep_chain()))

        if host:
            # THE 'all' group and the rest of groups for a host, used below
            all_group = self._inventory.groups.get('all')
            host_groups = sort_groups(
                [g for g in host.get_groups() if g.name not in ['all']])

            def _get_plugin_vars(plugin, path, entities):
                data = {}
                try:
                    data = plugin.get_vars(self._loader, path, entities)
                except AttributeError:
                    try:
                        for entity in entities:
                            if isinstance(entity, Host):
                                data.update(plugin.get_host_vars(entity.name))
                            else:
                                data.update(plugin.get_group_vars(entity.name))
                    except AttributeError:
                        if hasattr(plugin, 'run'):
                            raise AnsibleError(
                                "Cannot use v1 type vars plugin %s from %s" %
                                (plugin._load_name, plugin._original_path))
                        else:
                            raise AnsibleError(
                                "Invalid vars plugin %s from %s" %
                                (plugin._load_name, plugin._original_path))
                return data

            # internal fuctions that actually do the work
            def _plugins_inventory(entities):
                ''' merges all entities by inventory source '''
                data = {}
                for inventory_dir in self._inventory._sources:
                    if ',' in inventory_dir and not os.path.exists(
                            inventory_dir):  # skip host lists
                        continue
                    elif not os.path.isdir(
                            inventory_dir
                    ):  # always pass 'inventory directory'
                        inventory_dir = os.path.dirname(inventory_dir)

                    for plugin in vars_loader.all():

                        data = combine_vars(
                            data,
                            _get_plugin_vars(plugin, inventory_dir, entities))
                return data

            def _plugins_play(entities):
                ''' merges all entities adjacent to play '''
                data = {}
                for plugin in vars_loader.all():

                    for path in basedirs:
                        data = combine_vars(
                            data, _get_plugin_vars(plugin, path, entities))
                return data

            # configurable functions that are sortable via config, rememer to add to _ALLOWED if expanding this list
            def all_inventory():
                return all_group.get_vars()

            def all_plugins_inventory():
                return _plugins_inventory([all_group])

            def all_plugins_play():
                return _plugins_play([all_group])

            def groups_inventory():
                ''' gets group vars from inventory '''
                return get_group_vars(host_groups)

            def groups_plugins_inventory():
                ''' gets plugin sources from inventory for groups '''
                return _plugins_inventory(host_groups)

            def groups_plugins_play():
                ''' gets plugin sources from play for groups '''
                return _plugins_play(host_groups)

            def plugins_by_groups():
                '''
                    merges all plugin sources by group,
                    This should be used instead, NOT in combination with the other groups_plugins* functions
                '''
                data = {}
                for group in host_groups:
                    data[group] = combine_vars(data[group],
                                               _plugins_inventory(group))
                    data[group] = combine_vars(data[group],
                                               _plugins_play(group))
                return data

            # Merge groups as per precedence config
            # only allow to call the functions we want exposed
            for entry in C.VARIABLE_PRECEDENCE:
                if entry in self._ALLOWED:
                    display.debug('Calling %s to load vars for %s' %
                                  (entry, host.name))
                    all_vars = combine_vars(all_vars, locals()[entry]())
                else:
                    display.warning(
                        'Ignoring unknown variable precedence entry: %s' %
                        (entry))

            # host vars, from inventory, inventory adjacent and play adjacent via plugins
            all_vars = combine_vars(all_vars, host.get_vars())
            all_vars = combine_vars(all_vars, _plugins_inventory([host]))
            all_vars = combine_vars(all_vars, _plugins_play([host]))

            # finally, the facts caches for this host, if it exists
            # TODO: cleaning of facts should eventually become part of taskresults instead of vars
            try:
                facts = wrap_var(self._fact_cache.get(host.name, {}))
                all_vars.update(namespace_facts(facts))

                # push facts to main namespace
                if C.INJECT_FACTS_AS_VARS:
                    all_vars = combine_vars(all_vars,
                                            wrap_var(clean_facts(facts)))
                else:
                    # always 'promote' ansible_local
                    all_vars = combine_vars(
                        all_vars,
                        wrap_var(
                            {'ansible_local': facts.get('ansible_local', {})}))
            except KeyError:
                pass

        if play:
            all_vars = combine_vars(all_vars, play.get_vars())

            vars_files = play.get_vars_files()
            try:
                for vars_file_item in vars_files:
                    # create a set of temporary vars here, which incorporate the extra
                    # and magic vars so we can properly template the vars_files entries
                    temp_vars = combine_vars(all_vars, self._extra_vars)
                    temp_vars = combine_vars(temp_vars, magic_variables)
                    self._templar.set_available_variables(temp_vars)

                    # we assume each item in the list is itself a list, as we
                    # support "conditional includes" for vars_files, which mimics
                    # the with_first_found mechanism.
                    vars_file_list = vars_file_item
                    if not isinstance(vars_file_list, list):
                        vars_file_list = [vars_file_list]

                    # now we iterate through the (potential) files, and break out
                    # as soon as we read one from the list. If none are found, we
                    # raise an error, which is silently ignored at this point.
                    try:
                        for vars_file in vars_file_list:
                            vars_file = self._templar.template(vars_file)
                            if not (isinstance(vars_file, Sequence)):
                                raise AnsibleError(
                                    "Invalid vars_files entry found: %r\n"
                                    "vars_files entries should be either a string type or "
                                    "a list of string types after template expansion"
                                    % vars_file)
                            try:
                                data = preprocess_vars(
                                    self._loader.load_from_file(vars_file,
                                                                unsafe=True))
                                if data is not None:
                                    for item in data:
                                        all_vars = combine_vars(all_vars, item)
                                break
                            except AnsibleFileNotFound:
                                # we continue on loader failures
                                continue
                            except AnsibleParserError:
                                raise
                        else:
                            # if include_delegate_to is set to False, we ignore the missing
                            # vars file here because we're working on a delegated host
                            if include_delegate_to:
                                raise AnsibleFileNotFound(
                                    "vars file %s was not found" %
                                    vars_file_item)
                    except (UndefinedError, AnsibleUndefinedVariable):
                        if host is not None and self._fact_cache.get(
                                host.name, dict()).get(
                                    'module_setup') and task is not None:
                            raise AnsibleUndefinedVariable(
                                "an undefined variable was found when attempting to template the vars_files item '%s'"
                                % vars_file_item,
                                obj=vars_file_item)
                        else:
                            # we do not have a full context here, and the missing variable could be because of that
                            # so just show a warning and continue
                            display.vvv(
                                "skipping vars_file '%s' due to an undefined variable"
                                % vars_file_item)
                            continue

                    display.vvv("Read vars_file '%s'" % vars_file_item)
            except TypeError:
                raise AnsibleParserError(
                    "Error while reading vars files - please supply a list of file names. "
                    "Got '%s' of type %s" % (vars_files, type(vars_files)))

            # By default, we now merge in all vars from all roles in the play,
            # unless the user has disabled this via a config option
            if not C.DEFAULT_PRIVATE_ROLE_VARS:
                for role in play.get_roles():
                    all_vars = combine_vars(
                        all_vars, role.get_vars(include_params=False))

        # next, we merge in the vars from the role, which will specifically
        # follow the role dependency chain, and then we merge in the tasks
        # vars (which will look at parent blocks/task includes)
        if task:
            if task._role:
                all_vars = combine_vars(
                    all_vars,
                    task._role.get_vars(task.get_dep_chain(),
                                        include_params=False))
            all_vars = combine_vars(all_vars, task.get_vars())

        # next, we merge in the vars cache (include vars) and nonpersistent
        # facts cache (set_fact/register), in that order
        if host:
            # include_vars non-persistent cache
            all_vars = combine_vars(
                all_vars, self._vars_cache.get(host.get_name(), dict()))
            # fact non-persistent cache
            all_vars = combine_vars(
                all_vars,
                self._nonpersistent_fact_cache.get(host.name, dict()))

        # next, we merge in role params and task include params
        if task:
            if task._role:
                all_vars = combine_vars(
                    all_vars, task._role.get_role_params(task.get_dep_chain()))

            # special case for include tasks, where the include params
            # may be specified in the vars field for the task, which should
            # have higher precedence than the vars/np facts above
            all_vars = combine_vars(all_vars, task.get_include_params())

        # extra vars
        all_vars = combine_vars(all_vars, self._extra_vars)

        # magic variables
        all_vars = combine_vars(all_vars, magic_variables)

        # special case for the 'environment' magic variable, as someone
        # may have set it as a variable and we don't want to stomp on it
        if task:
            all_vars['environment'] = task.environment

        # if we have a task and we're delegating to another host, figure out the
        # variables for that host now so we don't have to rely on hostvars later
        if task and task.delegate_to is not None and include_delegate_to:
            all_vars['ansible_delegated_vars'], all_vars[
                '_ansible_loop_cache'] = self._get_delegated_vars(
                    play, task, all_vars)

        # 'vars' magic var
        if task or play:
            # has to be copy, otherwise recursive ref
            all_vars['vars'] = all_vars.copy()

        display.debug("done with get_vars()")
        return all_vars
Exemple #25
0
    def add_hives(self):
        separate_repository = self.stage.get('separate_repository', True)
        number_of_hosts = self.stage.get('number_of_hosts',
                                         4 if separate_repository else 3)
        if 'ip_address_list' in self.stage:
            number_of_hosts = len(self.stage.get('ip_address_list'))
        for idx in range(number_of_hosts):
            host_name = f'{self.stage_prefix}hive{idx}.{self.name}'
            self.inventory.add_host(host_name, group=self.stage_name)
            if 'root_password' in self.stage:
                self.inventory.set_variable(host_name, 'hive_root_password',
                                            self.stage['root_password'])
            if 'internal_cidr' in self.stage:
                self.inventory.set_variable(host_name, 'hive_internal_cidr',
                                            self.stage['internal_cidr'])
            if 'internal_cidr_v6' in self.stage:
                self.inventory.set_variable(host_name, 'hive_internal_cidr_v6',
                                            self.stage['internal_cidr_v6'])
            if 'kms_key_id' in self.stage:
                self.inventory.set_variable(host_name, 'hive_kms_key_id',
                                            self.stage['kms_key_id'])
            if idx == number_of_hosts - 1:
                if not separate_repository:
                    self.inventory.add_host(host_name, group='hives')
                    if 'memory_size' in self.stage:
                        self.inventory.set_variable(host_name,
                                                    'hive_memory_size',
                                                    self.stage['memory_size'])
                    if 'cpus' in self.stage:
                        self.inventory.set_variable(host_name, 'hive_cpus',
                                                    self.stage['cpus'])
                    if 'disk_size' in self.stage:
                        self.inventory.set_variable(host_name,
                                                    'hive_disk_size',
                                                    self.stage['disk_size'])
                    if 'disk_encrypted' in self.stage:
                        self.inventory.set_variable(
                            host_name, 'hive_disk_encrypted',
                            self.stage['disk_encrypted'])
                    if 'instance_type' in self.stage:
                        self.inventory.set_variable(
                            host_name, 'hive_instance_type',
                            self.stage['instance_type'])
                    if 'mirrored_disk_size' in self.stage:
                        self.inventory.set_variable(
                            host_name, 'hive_mirrored_disk_size',
                            self.stage['mirrored_disk_size'])
                    if 'mirrored_disk_encrypted' in self.stage:
                        if 'kms_key_id' not in self.stage:
                            raise AnsibleParserError(
                                'mirrored_disk_encrypted require kms_key_id')
                        self.inventory.set_variable(
                            host_name, 'hive_mirrored_disk_encrypted',
                            self.stage['mirrored_disk_encrypted'])
                self.inventory.add_host(host_name, group='repository')
                if 'repository_memory_size' in self.stage:
                    self.inventory.set_variable(
                        host_name, 'hive_memory_size',
                        self.stage['repository_memory_size'])
                if 'repository_cpus' in self.stage:
                    self.inventory.set_variable(host_name, 'hive_cpus',
                                                self.stage['repository_cpus'])
                if 'repository_disk_size' in self.stage:
                    self.inventory.set_variable(
                        host_name, 'hive_disk_size',
                        self.stage['repository_disk_size'])
                if 'repository_disk_encrypted' in self.stage:
                    if 'kms_key_id' not in self.stage:
                        raise AnsibleParserError(
                            'repository_disk_encrypted require kms_key_id')
                    self.inventory.set_variable(
                        host_name, 'hive_disk_encrypted',
                        self.stage['repository_disk_encrypted'])
                if 'repository_instance_type' in self.stage:
                    self.inventory.set_variable(
                        host_name, 'hive_instance_type',
                        self.stage['repository_instance_type'])
            else:
                self.inventory.add_host(host_name, group='hives')
                if 'memory_size' in self.stage:
                    self.inventory.set_variable(host_name, 'hive_memory_size',
                                                self.stage['memory_size'])
                if 'cpus' in self.stage:
                    self.inventory.set_variable(host_name, 'hive_cpus',
                                                self.stage['cpus'])
                if 'disk_size' in self.stage:
                    self.inventory.set_variable(host_name, 'hive_disk_size',
                                                self.stage['disk_size'])
                if 'disk_encrypted' in self.stage:
                    if 'kms_key_id' not in self.stage:
                        raise AnsibleParserError(
                            'disk_encrypted require kms_key_id')
                    self.inventory.set_variable(host_name,
                                                'hive_disk_encrypted',
                                                self.stage['disk_encrypted'])
                if 'instance_type' in self.stage:
                    self.inventory.set_variable(host_name,
                                                'hive_instance_type',
                                                self.stage['instance_type'])
                if 'mirrored_disk_size' in self.stage:
                    self.inventory.set_variable(
                        host_name, 'hive_mirrored_disk_size',
                        self.stage['mirrored_disk_size'])
                if 'mirrored_disk_encrypted' in self.stage:
                    self.inventory.set_variable(
                        host_name, 'hive_mirrored_disk_encrypted',
                        self.stage['mirrored_disk_encrypted'])

            if 'image_name' in self.stage:
                self.inventory.set_variable(host_name, 'hive_vm_image_name',
                                            self.stage['image_name'])

            subnet = self.subnets[idx % len(self.subnets)]
            if 'name' in subnet:
                self.inventory.set_variable(host_name, 'hive_subnet',
                                            subnet['name'])
            if 'region' in self.stage:
                az_suffix_list = self.stage.get('az_suffix_list',
                                                ['-a', '-b', '-c'])
                az_default = self.stage['region'] + az_suffix_list[
                    idx % len(az_suffix_list)]
                if self.provider == 'azure':
                    az_default = (idx % 3) + 1
                self.inventory.set_variable(
                    host_name, 'hive_available_zone',
                    subnet.get('available_zone', az_default))
            self.inventory.set_variable(host_name, 'hive_private_ip',
                                        next(subnet['ip_list']))
            self.inventory.set_variable(host_name, 'hive_netmask',
                                        subnet['netmask'])
        self.inventory.set_variable(
            'hives', 'hive_swarm_master',
            f'{self.stage_prefix}hive{os.getenv("HIVE_FIRST_HIVE")}.{self.name}'
        )
        self.inventory.add_host(
            f'{self.stage_prefix}hive{os.getenv("HIVE_FIRST_HIVE")}.{self.name}',
            group='first_hive')
Exemple #26
0
    def parse(self, inventory, loader, path, cache=None):

        super(InventoryModule, self).parse(inventory, loader, path)
        self.set_options()

        if cache is None:
            cache = self.get_option('cache')

        # Support inventory scripts that are not prefixed with some
        # path information but happen to be in the current working
        # directory when '.' is not in PATH.
        cmd = [path, "--list"]

        try:
            cache_key = self._get_cache_prefix(path)
            if not cache or cache_key not in self._cache:
                try:
                    sp = subprocess.Popen(cmd,
                                          stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE)
                except OSError as e:
                    raise AnsibleParserError("problem running %s (%s)" %
                                             (' '.join(cmd), to_native(e)))
                (stdout, stderr) = sp.communicate()

                path = to_native(path)
                err = to_native(stderr or "")

                if err and not err.endswith('\n'):
                    err += '\n'

                if sp.returncode != 0:
                    raise AnsibleError(
                        "Inventory script (%s) had an execution error: %s " %
                        (path, err))

                # make sure script output is unicode so that json loader will output unicode strings itself
                try:
                    data = to_text(stdout, errors="strict")
                except Exception as e:
                    raise AnsibleError(
                        "Inventory {0} contained characters that cannot be interpreted as UTF-8: {1}"
                        .format(path, to_native(e)))

                try:
                    self._cache[cache_key] = self.loader.load(data,
                                                              json_only=True)
                except Exception as e:
                    raise AnsibleError(
                        "failed to parse executable inventory script results from {0}: {1}\n{2}"
                        .format(path, to_native(e), err))

                # if no other errors happened and you want to force displaying stderr, do so now
                if stderr and self.get_option('always_show_stderr'):
                    self.display.error(msg=to_text(err))

            processed = self._cache[cache_key]
            if not isinstance(processed, Mapping):
                raise AnsibleError(
                    "failed to parse executable inventory script results from {0}: needs to be a json dict\n{1}"
                    .format(path, err))

            group = None
            data_from_meta = None

            # A "_meta" subelement may contain a variable "hostvars" which contains a hash for each host
            # if this "hostvars" exists at all then do not call --host for each # host.
            # This is for efficiency and scripts should still return data
            # if called with --host for backwards compat with 1.2 and earlier.
            for (group, gdata) in processed.items():
                if group == '_meta':
                    if 'hostvars' in gdata:
                        data_from_meta = gdata['hostvars']
                else:
                    self._parse_group(group, gdata)

            for host in self._hosts:
                got = {}
                if data_from_meta is None:
                    got = self.get_host_variables(path, host)
                else:
                    try:
                        got = data_from_meta.get(host, {})
                    except AttributeError as e:
                        raise AnsibleError(
                            "Improperly formatted host information for %s: %s"
                            % (host, to_native(e)),
                            orig_exc=e)

                self._populate_host_vars([host], got)

        except Exception as e:
            raise AnsibleParserError(to_native(e))
    def _parse_group(self, group, group_data):

        if isinstance(group_data, (MutableMapping, NoneType)):

            try:
                group = self.inventory.add_group(group)
            except AnsibleError as e:
                raise AnsibleParserError("Unable to add group %s: %s" %
                                         (group, to_text(e)))

            if group_data is not None:
                # make sure they are dicts
                for section in ['vars', 'children', 'hosts']:
                    if section in group_data:
                        # convert strings to dicts as these are allowed
                        if isinstance(group_data[section], string_types):
                            group_data[section] = {group_data[section]: None}

                        if not isinstance(group_data[section],
                                          (MutableMapping, NoneType)):
                            raise AnsibleParserError(
                                'Invalid "%s" entry for "%s" group, requires a dictionary, found "%s" instead.'
                                % (section, group, type(group_data[section])))

                for key in group_data:

                    if not isinstance(group_data[key],
                                      (MutableMapping, NoneType)):
                        self.display.warning(
                            'Skipping key (%s) in group (%s) as it is not a mapping, it is a %s'
                            % (key, group, type(group_data[key])))
                        continue

                    if isinstance(group_data[key], NoneType):
                        self.display.vvv(
                            'Skipping empty key (%s) in group (%s)' %
                            (key, group))
                    elif key == 'vars':
                        for var in group_data[key]:
                            self.inventory.set_variable(
                                group, var, group_data[key][var])
                    elif key == 'children':
                        for subgroup in group_data[key]:
                            subgroup = self._parse_group(
                                subgroup, group_data[key][subgroup])
                            self.inventory.add_child(group, subgroup)

                    elif key == 'hosts':
                        for host_pattern in group_data[key]:
                            hosts, port = self._parse_host(host_pattern)
                            self._populate_host_vars(
                                hosts, group_data[key][host_pattern] or {},
                                group, port)
                    else:
                        self.display.warning(
                            'Skipping unexpected key (%s) in group (%s), only "vars", "children" and "hosts" are valid'
                            % (key, group))

        else:
            self.display.warning(
                "Skipping '%s' as this is not a valid group definition" %
                group)

        return group
Exemple #28
0
    def parse(self):
        '''
        Given a task in one of the supported forms, parses and returns
        returns the action, arguments, and delegate_to values for the
        task, dealing with all sorts of levels of fuzziness.
        '''

        thing      = None

        action      = None
        delegate_to = self._task_ds.get('delegate_to', None)
        args        = dict()


        # this is the 'extra gross' scenario detailed above, so we grab
        # the args and pass them in as additional arguments, which can/will
        # be overwritten via dict updates from the other arg sources below
        additional_args = self._task_ds.get('args', dict())

        # We can have one of action, local_action, or module specified
        # action
        if 'action' in self._task_ds:
            # an old school 'action' statement
            thing = self._task_ds['action']
            action, args = self._normalize_parameters(thing, additional_args=additional_args)

        # local_action
        if 'local_action' in self._task_ds:
            # local_action is similar but also implies a delegate_to
            if action is not None:
                raise AnsibleParserError("action and local_action are mutually exclusive", obj=self._task_ds)
            thing = self._task_ds.get('local_action', '')
            delegate_to = 'localhost'
            action, args = self._normalize_parameters(thing, additional_args=additional_args)

        # module: <stuff> is the more new-style invocation

        # walk the input dictionary to see we recognize a module name
        for (item, value) in iteritems(self._task_ds):
            if item in module_loader or item == 'meta' or item == 'include':
                # finding more than one module name is a problem
                if action is not None:
                    raise AnsibleParserError("conflicting action statements", obj=self._task_ds)
                action = item
                thing = value
                action, args = self._normalize_parameters(value, action=action, additional_args=additional_args)

        # if we didn't see any module in the task at all, it's not a task really
        if action is None:
            if 'ping' not in module_loader:
                raise AnsibleParserError("The requested action was not found in configured module paths. "
                        "Additionally, core modules are missing. If this is a checkout, "
                        "run 'git submodule update --init --recursive' to correct this problem.",
                        obj=self._task_ds)

            else:
                raise AnsibleParserError("no action detected in task", obj=self._task_ds)
        elif args.get('_raw_params', '') != '' and action not in RAW_PARAM_MODULES:
            templar = Templar(loader=None)
            raw_params = args.pop('_raw_params')
            if templar._contains_vars(raw_params):
                args['_variable_params'] = raw_params
            else:
                raise AnsibleParserError("this task '%s' has extra params, which is only allowed in the following modules: %s" % (action, ", ".join(RAW_PARAM_MODULES)), obj=self._task_ds)

        # shell modules require special handling
        (action, args) = self._handle_shell_weirdness(action, args)

        return (action, args, delegate_to)
Exemple #29
0
    def parse(self, inventory, loader, path, cache=True):

        super(InventoryModule, self).parse(inventory, loader, path)

        # Support inventory scripts that are not prefixed with some
        # path information but happen to be in the current working
        # directory when '.' is not in PATH.
        path = os.path.abspath(path)
        cmd = [path, "--list"]

        try:
            cache_key = self.get_cache_prefix(path)
            if cache and cache_key in inventory.cache:
                data = inventory.cache[cache_key]
            else:
                try:
                    sp = subprocess.Popen(cmd,
                                          stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE)
                except OSError as e:
                    raise AnsibleError("problem running %s (%s)" %
                                       (' '.join(cmd), e))
                (stdout, stderr) = sp.communicate()

                path = to_native(path)
                if stderr:
                    err = to_native(stderr) + "\n"

                if sp.returncode != 0:
                    raise AnsibleError(
                        "Inventory script (%s) had an execution error: %s " %
                        (path, err))

                # make sure script output is unicode so that json loader will output
                # unicode strings itself
                try:
                    data = to_text(stdout, errors="strict")
                except Exception as e:
                    raise AnsibleError(
                        "Inventory {0} contained characters that cannot be interpreted as UTF-8: {1}"
                        .format(path, to_native(e)))

                if cache:
                    inventory.cache[cache_key] = data

            try:
                processed = self.loader.load(data)
            except Exception as e:
                raise AnsibleError(
                    "failed to parse executable inventory script results from {0}: {1}\n{2}"
                    .format(path, to_native(e), err))

            if not isinstance(processed, Mapping):
                raise AnsibleError(
                    "failed to parse executable inventory script results from {0}: needs to be a json dict\n{1}"
                    .format(path, err))

            group = None
            data_from_meta = None
            for (group, gdata) in processed.items():
                if group == '_meta':
                    if 'hostvars' in data:
                        data_from_meta = data['hostvars']
                else:
                    self._parse_group(group, gdata)

            # in Ansible 1.3 and later, a "_meta" subelement may contain
            # a variable "hostvars" which contains a hash for each host
            # if this "hostvars" exists at all then do not call --host for each
            # host.  This is for efficiency and scripts should still return data
            # if called with --host for backwards compat with 1.2 and earlier.
            for host in self._hosts:
                got = {}
                if data_from_meta is None:
                    got = self.get_host_variables(path, host, data_from_meta)
                else:
                    try:
                        got = data.get(host, {})
                    except AttributeError as e:
                        raise AnsibleError(
                            "Improperly formatted host information for %s: %s"
                            % (host, to_native(e)))

                    self.populate_host_vars(host, got, group)

        except Exception as e:
            raise AnsibleParserError(e)
    def run(self, terms, variables=None, **kwargs):
        ret = []

        for term in terms:
            display.debug("Forklift lookup term: %s" % term)

            lookup_params = dict(x.split('=', 1) for x in term.split())

            try:
                scenario = lookup_params['scenario']
                if scenario != 'foreman':
                    scenario = 'katello'
                scenario_version = lookup_params['scenario_version']
                versions_file_name = lookup_params['file']
                upgrade = lookup_params.get('upgrade', False)
            except KeyError:
                raise AnsibleParserError(
                    "missing required param for forklift_version")

            try:
                with open(versions_file_name, 'r') as versions_file:
                    versions = yaml.safe_load(versions_file)
            except Exception:
                raise AnsibleLookupError("couldn't read '%s'" %
                                         versions_file_name)

            if not upgrade:
                for version in versions['installers']:
                    if version[scenario] == scenario_version:
                        forklift_vars = {
                            'foreman_repositories_version':
                            version['foreman'],
                            'foreman_client_repositories_version':
                            version['foreman'],
                            'katello_repositories_version':
                            version['katello'],
                            'katello_repositories_pulp_version':
                            version['pulp'],
                            'pulp_repositories_version':
                            version['pulp'],
                            'puppet_repositories_version':
                            version['puppet'],
                        }
                        ret.append(forklift_vars)
                        break
            else:
                upgrade_versions = set()
                for version in reversed(versions['installers']):
                    if version[scenario] == scenario_version:
                        upgrade_versions.add(scenario_version)
                    elif 1 <= len(upgrade_versions) < TOTAL_UPGRADE_VERSIONS:
                        upgrade_versions.add(version[scenario])

                if len(upgrade_versions) == 0:
                    raise AnsibleLookupError("could not find %s/%s" %
                                             (scenario, scenario_version))

                upgrade_versions = sorted(upgrade_versions,
                                          key=version_sort_key)
                while len(upgrade_versions) < TOTAL_UPGRADE_VERSIONS:
                    upgrade_versions.insert(0, upgrade_versions[0])
                ret.append(upgrade_versions)

        return ret