Esempio n. 1
0
    def _get_parent_attribute(self, attr, extend=False, reverse=False):
        '''
        Generic logic to get the attribute or parent attribute for a command value.
        '''
        value = None
        try:
            value = self._attributes[attr]

            if self.ablefile:
                if self.ablefile.active_stage is not None:
                    parent_value = getattr(self.ablefile.active_stage, attr)
                else:
                    parent_value = getattr(self.ablefile, attr)
                if extend:
                    if type(value) == type(parent_value):
                        if isinstance(value, list):
                            if not reverse:
                                return self._extend_value(value, parent_value)
                            else:
                                return self._extend_value(parent_value, value)

                        if isinstance(value, dict):
                            if not reverse:
                                return merge_hash(parent_value, value)
                            else:
                                return merge_hash(value, parent_value)

                    return value if value is not None else parent_value

                else:
                    value = parent_value
        except KeyError:
            pass

        return value
    def run(self, tmp=None, task_vars=None):

        if task_vars is None:
            task_vars = dict()

        results = super(ActionModule, self).run(tmp, task_vars)

        args = self._task.args.copy()

        key = args.pop('key', None)
        path = args.pop('path', None)

        new_module_args = {
            'src': path
        }
        self._update_module_args('slurp', new_module_args, task_vars)

        results = merge_hash(
            results,
            # executes slurp module on remote host
            self._execute_module(module_name='slurp', tmp=tmp, task_vars=task_vars, module_args=new_module_args)
        )

        if 'failed' in results and results['failed'] is True:
            return results

        # already base64 encoded from slurp
        content = results.pop('content', None)

        self._play_context.become = False
        self._play_context.become_method = None

        self._connection = self._shared_loader_obj.connection_loader.get('local', self._play_context,
                                                                         self._connection._new_stdin)

        args['data'] = {key: content}
        if 'update' not in args:
            args['update'] = True

        results = merge_hash(
            results,
            # executes hashivault_write module on localhost
            self._execute_module(module_name='hashivault_write', tmp=tmp, task_vars=task_vars, module_args=args)
        )

        results['invocation']['module_args']['data'] = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'

        return results
Esempio n. 3
0
    def run(self, tmp=None, task_vars=None):

        # individual modules might disagree but as the generic the action plugin, pass at this point.
        self._supports_check_mode = True
        self._supports_async = True

        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        if not result.get('skipped'):

            if result.get('invocation', {}).get('module_args'):
                # avoid passing to modules in case of no_log
                # should not be set anymore but here for backwards compatibility
                del result['invocation']['module_args']

            # FUTURE: better to let _execute_module calculate this internally?
            wrap_async = self._task.async_val and not self._connection.has_native_async

            # do work!
            result = merge_hash(result, self._execute_module(task_vars=task_vars, wrap_async=wrap_async))

            # hack to keep --verbose from showing all the setup module result
            # moved from setup module as now we filter out all _ansible_ from result
            if self._task.action == 'setup':
                result['_ansible_verbose_override'] = True

        if not wrap_async:
            # remove a temporary path we created
            self._remove_tmp_path(self._connection._shell.tmpdir)

        return result
Esempio n. 4
0
def load_host_vars(hostname, inventory='hosts'):
    inventory = get_inventory(inventory)
    host = inventory.get_host(hostname)
    if HAS_ANSIBLE2:
        gv = host.get_group_vars()
        hv = host.get_vars()
        return merge_hash(gv, hv)
    else:
        return host.get_variables()
def list_modules(module_dir, depth=0):
    ''' returns a hash of categories, each category being a hash of module names to file paths '''

    categories = dict(all=dict(),_aliases=dict())
    if depth <= 3: # limit # of subdirs

        files = glob.glob("%s/*" % module_dir)
        for d in files:

            category = os.path.splitext(os.path.basename(d))[0]
            if os.path.isdir(d):

                res = list_modules(d, depth + 1)
                for key in list(res.keys()):
                    if key in categories:
                        categories[key] = merge_hash(categories[key], res[key])
                        res.pop(key, None)

                if depth < 2:
                    categories.update(res)
                else:
                    category = module_dir.split("/")[-1]
                    if not category in categories:
                        categories[category] = res
                    else:
                        categories[category].update(res)
            else:
                module = category
                category = os.path.basename(module_dir)
                if ("caas_" not in d) or not d.endswith(".py") or d.endswith('__init__.py'):
                    # windows powershell modules have documentation stubs in python docstring
                    # format (they are not executed) so skip the ps1 format files
                    continue
                elif module.startswith("_") and os.path.islink(d):
                    source = os.path.splitext(os.path.basename(os.path.realpath(d)))[0]
                    module = module.replace("_","",1)
                    if not d in categories['_aliases']:
                        categories['_aliases'][source] = [module]
                    else:
                        categories['_aliases'][source].update(module)
                    continue

                if not category in categories:
                    categories[category] = {}
                categories[category][module] = d
                categories['all'][module] = d

    # keep module tests out of becomeing module docs
    if 'test' in categories:
        del categories['test']

    return categories
Esempio n. 6
0
    def merge_data(self, dest, extend=False):

        # Walk all attributes in the class
        for (name, attribute) in iteritems(self._get_base_attributes()):
            if name in self._excluded_keys:
                continue

            value = getattr(self, name)
            if value is None or name not in self.get_ds():
                continue

            new_value = getattr(dest, name)
            if new_value is None or name not in dest.get_ds():
                setattr(dest, name, value)
                continue

            # Determine if we can merge
            if name not in self._combine_keys or type(value) != type(new_value):
                if not extend:
                    setattr(dest, name, value)

            elif isinstance(value, dict):
                if extend:
                    setattr(dest, name, merge_hash(new_value, value))
                else:
                    setattr(dest, name, merge_hash(value, new_value))

            elif isinstance(value, list):
                if extend:
                    new_value.extend(value)
                    setattr(dest, name, new_value)
                else:
                    value.extend(new_value)
                    setattr(dest, name, value)

            elif not extend:
                setattr(dest, name, value)
Esempio n. 7
0
    def update_custom_stats(self, which, what, host=None):
        ''' allow aggregation of a custom stat'''

        if host is None:
            host = '_run'
        if host not in self.custom or which not in self.custom[host]:
            return self.set_custom_stats(which, what, host)

        # mismatching types
        if not isinstance(what, type(self.custom[host][which])):
            return None

        if isinstance(what, MutableMapping):
            self.custom[host][which] = merge_hash(self.custom[host][which], what)
        else:
            # let overloaded + take care of other types
            self.custom[host][which] += what
Esempio n. 8
0
    def run(self, tmp=None, task_vars=None):
        self._supports_async = True
        results = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        # Command module has a special config option to turn off the command nanny warnings
        if 'warn' not in self._task.args:
            self._task.args['warn'] = C.COMMAND_WARNINGS

        wrap_async = self._task.async_val and not self._connection.has_native_async
        results = merge_hash(results, self._execute_module(task_vars=task_vars, wrap_async=wrap_async))

        if not wrap_async:
            # remove a temporary path we created
            self._remove_tmp_path(self._connection._shell.tmpdir)

        return results
Esempio n. 9
0
    def run(self, tmp=None, task_vars=None):
        if task_vars is None:
            task_vars = dict()

        results = super(ActionModule, self).run(tmp, task_vars)
        # remove as modules might hide due to nolog
        del results['invocation']['module_args']
        results = merge_hash(results, self._execute_module(tmp=tmp, task_vars=task_vars))
        # Remove special fields from the result, which can only be set
        # internally by the executor engine. We do this only here in
        # the 'normal' action, as other action plugins may set this.
        #
        # We don't want modules to determine that running the module fires
        # notify handlers.  That's for the playbook to decide.
        for field in ('_ansible_notify',):
            if field in results:
                results.pop(field)

        return results
Esempio n. 10
0
    def run(self, tmp=None, task_vars=None):

        # setup module
        results = super(ActionModule, self).run(tmp, task_vars)
        remote_user = task_vars.get(
            'ansible_ssh_user') or self._play_context.remote_user

        # get args
        # the_args.get('something')
        # the_args.has_key('something')
        the_args = self._task.args
        bypass_update = True if the_args.has_key('update') and the_args.get(
            'update') == '1' else False
        where_to_get_egg = the_args.get('core_url') if the_args.has_key(
            'core_url'
        ) else 'https://cert-api.access.redhat.com/r/insights/static/insights-core.egg'
        bypass_gpg = True if the_args.has_key('no_gpg') and the_args.get(
            'no_gpg') == 'yes' else False

        # import the egg
        import pkgutil
        package = pkgutil.get_loader('insights_core')
        location_to_the_egg = package.archive

        # is the core actually installed?
        current_version = None
        version_endpoint = None
        if package and not bypass_update:

            # check current egg version
            import insights_core
            current_version = insights_core.constants.version

            # curl version endpoint
            import urllib
            version_endpoint_response = urllib.urlopen(
                "https://cert-api.access.redhat.com/r/insights/static/insights-core.version"
            )
            version_endpoint = version_endpoint_response.read()

        # download the egg and install the egg if its out of date
        if (current_version < version_endpoint) or (not package
                                                    and not bypass_update):

            # download the egg
            import tempfile
            tmp_dir = tempfile.mkdtemp()
            egg_download_response = urllib.urlretrieve(where_to_get_egg,
                                                       tmp_dir)

            # verify the egg
            # gpg --verify $GPG_KEY $EGG_LOCATION > /dev/null 2>&1
            gpg_checks_out = True
            egg_verfication = True if gpg_checks_out or bypass_gpg else False

            # install the egg
            if egg_verification:
                # easy_install??
                pass
            else:
                # do some other stuff
                pass

        # copy our egg
        remote_path = None
        if package and location_to_the_egg:
            tmp = self._make_tmp_path(remote_user)
            source_full = self._loader.get_real_file(location_to_the_egg)
            tmp_src = self._connection._shell.join_path(tmp, 'insights')
            remote_path = self._transfer_file(source_full, tmp_src)
        results = merge_hash(
            results,
            self._execute_module(module_args={"egg_path": remote_path},
                                 module_name="insights",
                                 tmp=tmp,
                                 task_vars=task_vars))
        return results
Esempio n. 11
0
 def test_merge_hash_non_recursive_and_list_replace(self):
     low = self.merge_hash_data['low_prio']
     high = self.merge_hash_data['high_prio']
     expected = high
     self.assertEqual(merge_hash(low, high, False, 'replace'), expected)
Esempio n. 12
0
 def test_merge_hash(self):
     for test in self.test_merge_data:
         self.assertEqual(merge_hash(test['a'], test['b']), test['result'])
Esempio n. 13
0
 def test_merge_hash(self):
     for test in self.test_merge_data:
         self.assertEqual(merge_hash(test['a'], test['b']), test['result'])
    def run(self, tmp=None, task_vars=None):

        if task_vars is None:
            task_vars = dict()

        results = super(ActionModule, self).run(tmp, task_vars)

        args = self._task.args.copy()

        dest = args.pop('dest', None)
        mode = args.pop('mode', None)
        force = args.pop('force', True)
        become = self._play_context.become
        become_method = self._play_context.become_method

        old_connection = self._connection
        self._connection = self._shared_loader_obj.connection_loader.get(
            'local', PlayContext(), old_connection._new_stdin)
        self._play_context.become = False
        self._play_context.become_method = None

        results = merge_hash(
            results,
            # executes hashivault_read module on localhost
            self._execute_module(module_name='hashivault_read',
                                 tmp=tmp,
                                 task_vars=task_vars,
                                 module_args=args))

        if 'failed' in results and results['failed'] == True:
            return results

        content = results.pop('value', None)

        if content == None:
            results['failed'] = True
            results['msg'] = 'Could not find file `%s` in secret `%s`' % (
                args['key'], args['secret'])
            return (results)

        #write to temp file on ansible host to copy to remote host
        local_tmp = tempfile.NamedTemporaryFile(delete=False)
        local_tmp.write(base64.b64decode(content))
        local_tmp.close()

        new_module_args = {
            'dest': dest,
            'src': local_tmp.name,
            'force': force,
            'mode': mode
        }

        self._update_module_args('copy', new_module_args, task_vars)

        # `copy` module uses an action plugin, so we have to execute
        # the plugin instead of directly executing the module
        copy_action = self._get_copy_action_plugin(old_connection)
        copy_action._task.args = new_module_args
        copy_action._play_context.become = become
        copy_action._play_context.become_method = become_method

        results = merge_hash(
            results,
            # executes copy action plugin/module on remote host
            copy_action.run(task_vars=task_vars))

        #remove temp file
        os.unlink(local_tmp.name)

        if force == False and results['changed'] == False:
            results['failed'] = True
            results[
                'msg'] = 'File %s already exists. Use `force: true` to overwrite' % dest

        return (results)
Esempio n. 15
0
 def test_merge_hash(self):
     for test in self.test_merge_data:
         self.assertEqual(merge_hash(test["a"], test["b"]), test["result"])
Esempio n. 16
0
    def run(self, tmp=None, task_vars=None):

        self._supports_check_mode = True
        self._supports_async = True

        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        if not result.get('skipped'):

            # FUTURE: better to let _execute_module calculate this internally?
            wrap_async = self._task.async_val and not self._connection.has_native_async

            # Set short names for values we'll have to compare or reuse
            task_poll = self._task.poll
            task_async = self._task.async_val
            check_mode = self._play_context.check_mode
            max_timeout = self._connection._play_context.timeout
            module_args = self._task.args

            if module_args.get('state', None) == 'restored':
                if not wrap_async:
                    if not check_mode:
                        display.warning(self.MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK % (
                            task_poll,
                            task_async,
                            max_timeout))
                elif task_poll:
                    raise AnsibleActionFail(self.MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO % (
                        task_poll,
                        task_async,
                        max_timeout))
                else:
                    if task_async > max_timeout and not check_mode:
                        display.warning(self.MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT % (
                            task_poll,
                            task_async,
                            max_timeout))

                    # inject the async directory based on the shell option into the
                    # module args
                    async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")

                    # Bind the loop max duration to consistent values on both
                    # remote and local sides (if not the same, make the loop
                    # longer on the controller); and set a backup file path.
                    module_args['_timeout'] = task_async
                    module_args['_back'] = '%s/iptables.state' % async_dir
                    async_status_args = dict(mode='status')
                    confirm_cmd = 'rm -f %s' % module_args['_back']
                    starter_cmd = 'touch %s.starter' % module_args['_back']
                    remaining_time = max(task_async, max_timeout)

            # do work!
            result = merge_hash(result, self._execute_module(module_args=module_args, task_vars=task_vars, wrap_async=wrap_async))

            # Then the 3-steps "go ahead or rollback":
            # 1. Catch early errors of the module (in asynchronous task) if any.
            #    Touch a file on the target to signal the module to process now.
            # 2. Reset connection to ensure a persistent one will not be reused.
            # 3. Confirm the restored state by removing the backup on the remote.
            #    Retrieve the results of the asynchronous task to return them.
            if '_back' in module_args:
                async_status_args['jid'] = result.get('ansible_job_id', None)
                if async_status_args['jid'] is None:
                    raise AnsibleActionFail("Unable to get 'ansible_job_id'.")

                # Catch early errors due to missing mandatory option, bad
                # option type/value, missing required system command, etc.
                result = merge_hash(result, self._async_result(async_status_args, task_vars, 0))

                # The module is aware to not process the main iptables-restore
                # command before finding (and deleting) the 'starter' cookie on
                # the host, so the previous query will not reach ssh timeout.
                dummy = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE)

                # As the main command is not yet executed on the target, here
                # 'finished' means 'failed before main command be executed'.
                if not result['finished']:
                    try:
                        self._connection.reset()
                    except AttributeError:
                        pass

                    for dummy in range(max_timeout):
                        time.sleep(1)
                        remaining_time -= 1
                        # - AnsibleConnectionFailure covers rejected requests (i.e.
                        #   by rules with '--jump REJECT')
                        # - ansible_timeout is able to cover dropped requests (due
                        #   to a rule or policy DROP) if not lower than async_val.
                        try:
                            dummy = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE)
                            break
                        except AnsibleConnectionFailure:
                            continue

                    result = merge_hash(result, self._async_result(async_status_args, task_vars, remaining_time))

                # Cleanup async related stuff and internal params
                for key in ('ansible_job_id', 'results_file', 'started', 'finished'):
                    if result.get(key):
                        del result[key]

                if result.get('invocation', {}).get('module_args'):
                    for key in ('_back', '_timeout', '_async_dir', 'jid'):
                        if result['invocation']['module_args'].get(key):
                            del result['invocation']['module_args'][key]

                async_status_args['mode'] = 'cleanup'
                dummy = self._async_result(async_status_args, task_vars, 0)

        if not wrap_async:
            # remove a temporary path we created
            self._remove_tmp_path(self._connection._shell.tmpdir)

        return result
Esempio n. 17
0
    def run(self, tmp=None, task_vars=None):

        self._supports_check_mode = True
        self._supports_async = True

        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        if not result.get('skipped'):

            # FUTURE: better to let _execute_module calculate this internally?
            wrap_async = self._task.async_val and not self._connection.has_native_async

            # Set short names for values we'll have to compare or reuse
            task_poll = self._task.poll
            task_async = self._task.async_val
            check_mode = self._play_context.check_mode
            max_timeout = self._connection._play_context.timeout
            module_name = self._task.action
            module_args = self._task.args

            if module_args.get('state', None) == 'restored':
                if not wrap_async:
                    if not check_mode:
                        display.warning(
                            self.MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK %
                            (task_poll, task_async, max_timeout))
                elif task_poll:
                    raise AnsibleActionFail(
                        self.MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO %
                        (task_poll, task_async, max_timeout))
                else:
                    if task_async > max_timeout and not check_mode:
                        display.warning(
                            self.MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT %
                            (task_poll, task_async, max_timeout))

                    # BEGIN snippet from async_status action plugin
                    env_async_dir = [
                        e for e in self._task.environment
                        if "ANSIBLE_ASYNC_DIR" in e
                    ]
                    if len(env_async_dir) > 0:
                        # for backwards compatibility we need to get the dir from
                        # ANSIBLE_ASYNC_DIR that is defined in the environment. This is
                        # deprecated and will be removed in favour of shell options
                        async_dir = env_async_dir[0]['ANSIBLE_ASYNC_DIR']

                        msg = "Setting the async dir from the environment keyword " \
                              "ANSIBLE_ASYNC_DIR is deprecated. Set the async_dir " \
                              "shell option instead"
                        display.deprecated(msg,
                                           version='2.0.0',
                                           collection_name='community.general'
                                           )  # was Ansible 2.12
                    else:
                        # inject the async directory based on the shell option into the
                        # module args
                        async_dir = self.get_shell_option(
                            'async_dir', default="~/.ansible_async")
                    # END snippet from async_status action plugin

                    # Bind the loop max duration to consistent values on both
                    # remote and local sides (if not the same, make the loop
                    # longer on the controller); and set a backup file path.
                    module_args['_timeout'] = task_async
                    module_args['_back'] = '%s/iptables.state' % async_dir
                    async_status_args = dict(_async_dir=async_dir)
                    confirm_cmd = 'rm -f %s' % module_args['_back']
                    remaining_time = max(task_async, max_timeout)

            # do work!
            result = merge_hash(
                result,
                self._execute_module(module_args=module_args,
                                     task_vars=task_vars,
                                     wrap_async=wrap_async))

            # Then the 3-steps "go ahead or rollback":
            # - reset connection to ensure a persistent one will not be reused
            # - confirm the restored state by removing the backup on the remote
            # - retrieve the results of the asynchronous task to return them
            if '_back' in module_args:
                async_status_args['jid'] = result.get('ansible_job_id', None)
                if async_status_args['jid'] is None:
                    raise AnsibleActionFail("Unable to get 'ansible_job_id'.")

                # Catch early errors due to missing mandatory option, bad
                # option type/value, missing required system command, etc.
                result = merge_hash(
                    result, self._async_result(async_status_args, task_vars,
                                               0))

                if not result['finished']:
                    try:
                        self._connection.reset()
                        display.v("%s: reset connection" % (module_name))
                    except AttributeError:
                        display.warning(
                            "Connection plugin does not allow to reset the connection."
                        )

                    for x in range(max_timeout):
                        time.sleep(1)
                        remaining_time -= 1
                        # - AnsibleConnectionFailure covers rejected requests (i.e.
                        #   by rules with '--jump REJECT')
                        # - ansible_timeout is able to cover dropped requests (due
                        #   to a rule or policy DROP) if not lower than async_val.
                        try:
                            garbage = self._low_level_execute_command(
                                confirm_cmd, sudoable=self.DEFAULT_SUDOABLE)
                            break
                        except AnsibleConnectionFailure:
                            continue

                    result = merge_hash(
                        result,
                        self._async_result(async_status_args, task_vars,
                                           remaining_time))

                # Cleanup async related stuff and internal params
                for key in ('ansible_job_id', 'results_file', 'started',
                            'finished'):
                    if result.get(key):
                        del result[key]

                if result.get('invocation', {}).get('module_args'):
                    if '_timeout' in result['invocation']['module_args']:
                        del result['invocation']['module_args']['_back']
                        del result['invocation']['module_args']['_timeout']

                async_status_args['mode'] = 'cleanup'
                garbage = self._execute_module(module_name='async_status',
                                               module_args=async_status_args,
                                               task_vars=task_vars,
                                               wrap_async=False)

        if not wrap_async:
            # remove a temporary path we created
            self._remove_tmp_path(self._connection._shell.tmpdir)

        return result
    def run(self, tmp=None, task_vars=None):

        result = super(ActionModule, self).run(tmp, task_vars)

        args = dict()
        args = self._task.args.copy()
        args = merge_hash(args.pop('args', {}), args)
        object_type = args.pop('type', None)

        if object_type not in task_vars['icinga2_object_types']:
            raise AnsibleError('unknown Icinga object type: %s' % object_type)

        #
        # distribute to object type as module (name: icinga2_type)
        #
        obj = dict()
        obj = self._execute_module(module_name='icinga2_' +
                                   object_type.lower(),
                                   module_args=args,
                                   task_vars=task_vars,
                                   tmp=tmp)

        if 'failed' in obj:
            raise AnsibleError('Call to module failed: %s' % obj['msg'])
        elif 'skipped' in obj and obj['skipped']:
            raise AnsibleError('Call to module was skipped: %s' % obj['msg'])

        #
        # file path handling for assemble
        #
        path = task_vars['icinga2_fragments_path'] + '/' + obj['file'] + '/'
        file_fragment = path + obj['order'] + '_' + object_type.lower(
        ) + '-' + obj['name']

        file_args = dict()
        file_args['state'] = 'directory'
        file_args['path'] = path
        file_module = self._execute_module(module_name='file',
                                           module_args=file_args,
                                           task_vars=task_vars,
                                           tmp=tmp)
        result = merge_hash(result, file_module)

        if obj['state'] != 'absent':
            varlist = list()  # list of variables from 'apply for'

            #
            # quoting of object name?
            #
            if obj['name'] not in task_vars['icinga2_combined_constants']:
                object_name = '"' + obj['name'] + '"'
            else:
                object_name = obj['name']

            #
            # apply rule?
            #
            if 'apply' in obj and obj['apply'] and not obj['args']['assign']:
                raise AnsibleError(
                    'Apply rule %s is missing the assign rule.' % obj['name'])
            if 'apply' in obj and obj['apply']:
                object_content = 'apply ' + object_type
                if 'apply_target' in obj and obj['apply_target']:
                    object_content += ' ' + object_name + ' to ' + obj[
                        'apply_target']
                elif 'apply_for' in obj and obj['apply_for']:
                    object_content += ' for (' + obj['apply_for'] + ') '
                    r = re.search(r'^(.+)\s+in\s+', obj['apply_for'])
                    if r:
                        tmp = r.group(1).strip()
                        r = re.search(r'^(.+)=>(.+)$', tmp)
                        if r:
                            varlist.extend(
                                [r.group(1).strip(),
                                 r.group(2).strip()])
                        else:
                            varlist.append(tmp)
                else:
                    object_content += ' ' + object_name
            #
            # template?
            #
            elif 'template' in obj and obj['template']:
                object_content = 'template ' + object_type + ' ' + object_name
            #
            # object
            #
            else:
                object_content = 'object ' + object_type + ' ' + object_name
            object_content += ' {\n'

            #
            # imports?
            #
            if 'imports' in obj:
                for item in obj['imports']:
                    object_content += '  import "' + str(item) + '"\n'
                object_content += '\n'

            #
            # parser
            #
            object_content += Icinga2Parser().parse(
                obj['args'],
                list(task_vars['icinga2_combined_constants'].keys()) +
                task_vars['icinga2_reserved'] + varlist +
                list(obj['args'].keys()), 2) + '}\n'
            copy_action = self._task.copy()
            copy_action.args = dict()
            copy_action.args['dest'] = file_fragment
            copy_action.args['content'] = object_content

            copy_action = self._shared_loader_obj.action_loader.get(
                'copy',
                task=copy_action,
                connection=self._connection,
                play_context=self._play_context,
                loader=self._loader,
                templar=self._templar,
                shared_loader_obj=self._shared_loader_obj)

            result = merge_hash(result, copy_action.run(task_vars=task_vars))
        else:
            # remove file if does not belong to a feature
            if 'features-available' not in path:
                file_args = dict()
                file_args['state'] = 'absent'
                file_args['path'] = file_fragment
                file_module = self._execute_module(module_name='file',
                                                   module_args=file_args,
                                                   task_vars=task_vars,
                                                   tmp=tmp)
                result = merge_hash(result, file_module)
            result['dest'] = file_fragment

        return result
    def run(self, tmp=None, task_vars=None):

        if task_vars is None:
            task_vars = dict()

        results = super(ActionModule, self).run(tmp, task_vars)

        args = self._task.args.copy()

        dest = args.pop('dest', None)
        force = args.pop('force', True)
        base64 = args.pop('base64', True)

        new_module_args = {
            'dest': dest,
            'force': force,
        }
        copy_attributes = [
            'attributes',
            'backup',
            'checksum',
            'delimiter',
            'directory_mode',
            'follow',
            'group',
            'local_follow',
            'mode',
            'owner',
            'regexp',
            'selevel',
            'serole',
            'setype',
            'seuser',
            'unsafe_writes',
            'validate',
        ]
        for attribute in copy_attributes:
            value = args.pop(attribute, None)
            if value is not None:
                new_module_args[attribute] = value

        become = self._play_context.become
        become_method = self._play_context.become_method

        old_connection = self._connection
        self._connection = self._shared_loader_obj.connection_loader.get('local', PlayContext(),
                                                                         old_connection._new_stdin)
        self._play_context.become = False
        self._play_context.become_method = None

        results = merge_hash(
            results,
            # executes hashivault_read module on localhost
            self._execute_module(module_name='hashivault_read', tmp=tmp, task_vars=task_vars, module_args=args)
        )

        if 'failed' in results and results['failed'] is True:
            return results

        content = results.pop('value', None)

        if content is None:
            results['failed'] = True
            results['rc'] = 1
            results['msg'] = u'Could not find file `%s` in secret `%s`' % (args['key'], args['secret'])
            return results

        # write to temp file on ansible host to copy to remote host
        local_tmp = tempfile.NamedTemporaryFile(delete=False)
        if base64:
            try:
                content = base64encode.b64decode(content)
            except Exception as ex:
                results['failed'] = True
                results['rc'] = 1
                secret_key = str(args.pop('secret', 'secret')) + "/" + str(args.pop('key', ''))
                results['msg'] = u'Error base64 decoding secret %s: %s' % (secret_key, str(ex))
                return results
        else:
            try:
                import sys
                if sys.version_info[0] > 2:
                    content = bytes(content, 'utf-8')
                else:
                    content = bytes(content)
            except Exception as ex:
                results['failed'] = True
                results['rc'] = 1
                secret_key = str(args.pop('secret', 'secret')) + "/" + str(args.pop('key', ''))
                results['msg'] = u'Error preparing utf-8 secret %s: %s' % (secret_key, str(ex))
                return results
        local_tmp.write(content)
        local_tmp.close()
        new_module_args['src'] = local_tmp.name

        self._update_module_args('copy', new_module_args, task_vars)

        # `copy` module uses an action plugin, so we have to execute
        # the plugin instead of directly executing the module
        copy_action = self._get_copy_action_plugin(old_connection)
        copy_action._task.args = new_module_args
        copy_action._play_context.become = become
        copy_action._play_context.become_method = become_method

        results = merge_hash(
            results,
            # executes copy action plugin/module on remote host
            copy_action.run(task_vars=task_vars)
        )

        # remove temp file
        os.unlink(local_tmp.name)

        if force is False and results['changed'] is False:
            results['failed'] = True
            results['rc'] = 1
            results['msg'] = u'File %s already exists. Use `force: true` to overwrite' % dest

        return results