Exemplo n.º 1
0
    def run(self, tmp=None, task_vars=None):
        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        facts = dict()

        cacheable = boolean(self._task.args.pop('cacheable', False))

        if self._task.args:
            for (k, v) in iteritems(self._task.args):
                k = self._templar.template(k)

                if not isidentifier(k):
                    result['failed'] = True
                    result['msg'] = ("The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only "
                                     "letters, numbers and underscores." % k)
                    return result

                if isinstance(v, string_types) and v.lower() in ('true', 'false', 'yes', 'no'):
                    v = boolean(v, strict=False)
                facts[k] = v

        result['changed'] = False
        result['ansible_facts'] = facts
        result['_ansible_facts_cacheable'] = cacheable
        return result
Exemplo n.º 2
0
    def set_options(self, options):
        '''
        Configures this connection information instance with data from
        options specified by the user on the command line. These have a
        lower precedence than those set on the play or host.
        '''

        # privilege escalation
        self.become = options.become
        self.become_method = options.become_method
        self.become_user = options.become_user

        self.check_mode = boolean(options.check, strict=False)
        self.diff = boolean(options.diff, strict=False)

        #  general flags (should we move out?)
        #  should only be 'non plugin' flags
        for flag in OPTION_FLAGS:
            attribute = getattr(options, flag, False)
            if attribute:
                setattr(self, flag, attribute)

        if hasattr(options, 'timeout') and options.timeout:
            self.timeout = int(options.timeout)

        # get the tag info from options. We check to see if the options have
        # the attribute, as it is not always added via the CLI
        if hasattr(options, 'tags'):
            self.only_tags.update(options.tags)

        if len(self.only_tags) == 0:
            self.only_tags = set(['all'])

        if hasattr(options, 'skip_tags'):
            self.skip_tags.update(options.skip_tags)
Exemplo n.º 3
0
 def do_check(self, arg):
     """Toggle whether plays run with check mode"""
     if arg:
         self.options.check = boolean(arg, strict=False)
         display.v("check mode changed to %s" % self.options.check)
     else:
         display.display("Please specify check mode value, e.g. `check yes`")
Exemplo n.º 4
0
 def do_diff(self, arg):
     """Toggle whether plays run with diff"""
     if arg:
         self.options.diff = boolean(arg, strict=False)
         display.v("diff mode changed to %s" % self.options.diff)
     else:
         display.display("Please specify a diff value , e.g. `diff yes`")
Exemplo n.º 5
0
    def boolean(self, value, true='yes', false='no'):
        ''' Return an acceptable value back '''

        # When we expect value is of type=bool
        if value is None:
            return None
        elif value is True:
            return true
        elif value is False:
            return false

        # When we expect value is of type=raw, deprecate in Ansible v2.8 (and all modules use type=bool)
        try:
            # This supports all Ansible boolean types
            bool_value = boolean(value)
            if bool_value is True:
                return true
            elif bool_value is False:
                return false
        except:
            # This provides backward compatibility to Ansible v2.4, deprecate in Ansible v2.8
            if value == true:
                self.module.deprecate("Boolean value '%s' is no longer valid, please use 'yes' as a boolean value." % value, '2.9')
                return true
            elif value == false:
                self.module.deprecate("Boolean value '%s' is no longer valid, please use 'no' as a boolean value." % value, '2.9')
                return false

        # If all else fails, escalate back to user
        self.module.fail_json(msg="Boolean value '%s' is an invalid ACI boolean value.")
Exemplo n.º 6
0
    def run(self, terms, variables, **kwargs):

        anydict = False
        skip = False

        for term in terms:
            if isinstance(term, dict):
                anydict = True

        total_search = []
        if anydict:
            for term in terms:
                if isinstance(term, dict):
                    files = term.get('files', [])
                    paths = term.get('paths', [])
                    skip = boolean(term.get('skip', False), strict=False)

                    filelist = files
                    if isinstance(files, string_types):
                        files = files.replace(',', ' ')
                        files = files.replace(';', ' ')
                        filelist = files.split(' ')

                    pathlist = paths
                    if paths:
                        if isinstance(paths, string_types):
                            paths = paths.replace(',', ' ')
                            paths = paths.replace(':', ' ')
                            paths = paths.replace(';', ' ')
                            pathlist = paths.split(' ')

                    if not pathlist:
                        total_search = filelist
                    else:
                        for path in pathlist:
                            for fn in filelist:
                                f = os.path.join(path, fn)
                                total_search.append(f)
                else:
                    total_search.append(term)
        else:
            total_search = self._flatten(terms)

        for fn in total_search:
            try:
                fn = self._templar.template(fn)
            except (AnsibleUndefinedVariable, UndefinedError):
                continue

            # get subdir if set by task executor, default to files otherwise
            subdir = getattr(self, '_subdir', 'files')
            path = None
            path = self.find_file_in_search_path(variables, subdir, fn, ignore_missing=True)
            if path is not None:
                return [path]
        if skip:
            return []
        raise AnsibleLookupError("No file was found when using with_first_found. Use the 'skip: true' option to allow this task to be skipped if no "
                                 "files are found")
Exemplo n.º 7
0
 def do_become(self, arg):
     """Toggle whether plays run with become"""
     if arg:
         self.options.become = boolean(arg, strict=False)
         display.v("become changed to %s" % self.options.become)
         self.set_prompt()
     else:
         display.display("Please specify become value, e.g. `become yes`")
 def matchtask(self, file, task):
     if task["action"]["__ansible_module__"] in self._commands and \
             task["action"]["__ansible_arguments__"]:
         executable = os.path.basename(task["action"]["__ansible_arguments__"][0])
         if executable in self._modules and \
                 boolean(task['action'].get('warn', True)):
             message = "{0} used in place of {1} module"
             return message.format(executable, self._modules[executable])
Exemplo n.º 9
0
 def boolean_or_cacert(self, validate_certs, cacert):
     validate_certs = boolean(validate_certs, strict=False)
     '''' return a bool or cacert '''
     if validate_certs is True:
         if cacert != '':
             return cacert
         else:
             return True
     else:
         return False
    def matchtask(self, file, task):
        if task["action"]["__ansible_module__"] in self._commands:
            first_cmd_arg = get_first_cmd_arg(task)
            if not first_cmd_arg:
                return

            executable = os.path.basename(first_cmd_arg)
            if executable in self._arguments and \
                    boolean(task['action'].get('warn', True)):
                message = "{0} used in place of argument {1} to file module"
                return message.format(executable, self._arguments[executable])
Exemplo n.º 11
0
    def _kerb_auth(self, principal, password):
        if password is None:
            password = ""

        self._kerb_ccache = tempfile.NamedTemporaryFile()
        display.vvvvv("creating Kerberos CC at %s" % self._kerb_ccache.name)
        krb5ccname = "FILE:%s" % self._kerb_ccache.name
        os.environ["KRB5CCNAME"] = krb5ccname
        krb5env = dict(KRB5CCNAME=krb5ccname)

        # stores various flags to call with kinit, we currently only use this
        # to set -f so we can get a forward-able ticket (cred delegation)
        kinit_flags = []
        if boolean(self.get_option('_extras').get('ansible_winrm_kerberos_delegation', False)):
            kinit_flags.append('-f')

        kinit_cmdline = [self._kinit_cmd]
        kinit_cmdline.extend(kinit_flags)
        kinit_cmdline.append(principal)

        # pexpect runs the process in its own pty so it can correctly send
        # the password as input even on MacOS which blocks subprocess from
        # doing so. Unfortunately it is not available on the built in Python
        # so we can only use it if someone has installed it
        if HAS_PEXPECT:
            kinit_cmdline = " ".join(kinit_cmdline)
            password = to_text(password, encoding='utf-8',
                               errors='surrogate_or_strict')

            display.vvvv("calling kinit with pexpect for principal %s"
                         % principal)
            events = {
                ".*:": password + "\n"
            }
            # technically this is the stdout but to match subprocess we will
            # call it stderr
            stderr, rc = pexpect.run(kinit_cmdline, withexitstatus=True, events=events, env=krb5env, timeout=60)
        else:
            password = to_bytes(password, encoding='utf-8',
                                errors='surrogate_or_strict')

            display.vvvv("calling kinit with subprocess for principal %s"
                         % principal)
            p = subprocess.Popen(kinit_cmdline, stdin=subprocess.PIPE,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 env=krb5env)
            stdout, stderr = p.communicate(password + b'\n')
            rc = p.returncode != 0

        if rc != 0:
            raise AnsibleConnectionFailure("Kerberos auth failure: %s" % to_native(stderr.strip()))

        display.vvvvv("kinit succeeded for principal %s" % principal)
    def matchtask(self, file, task):
        if task['action']['__ansible_module__'] not in self._commands:
            return

        first_cmd_arg = get_first_cmd_arg(task)
        if not first_cmd_arg:
            return

        executable = os.path.basename(first_cmd_arg)
        if executable in self._modules and \
                boolean(task['action'].get('warn', True)):
            message = '{0} used in place of {1} module'
            return message.format(executable, self._modules[executable])
Exemplo n.º 13
0
    def run(self, tmp=None, task_vars=None):
        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        stats = {'data': {}, 'per_host': False, 'aggregate': True}

        if self._task.args:
            data = self._task.args.get('data', {})

            if not isinstance(data, dict):
                data = self._templar.template(data, convert_bare=False, fail_on_undefined=True)

            if not isinstance(data, dict):
                result['failed'] = True
                result['msg'] = "The 'data' option needs to be a dictionary/hash"
                return result

            # set boolean options, defaults are set above in stats init
            for opt in ['per_host', 'aggregate']:
                val = self._task.args.get(opt, None)
                if val is not None:
                    if not isinstance(val, bool):
                        stats[opt] = boolean(self._templar.template(val), strict=False)
                    else:
                        stats[opt] = val

            for (k, v) in iteritems(data):

                k = self._templar.template(k)

                if not isidentifier(k):
                    result['failed'] = True
                    result['msg'] = ("The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only "
                                     "letters, numbers and underscores." % k)
                    return result

                stats['data'][k] = self._templar.template(v)

        result['changed'] = False
        result['ansible_stats'] = stats

        return result
Exemplo n.º 14
0
    def run(self, tmp=None, task_vars=None):
        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)

        src = self._task.args.get('src', None)
        remote_src = boolean(self._task.args.get('remote_src', 'no'), strict=False)

        if src is None:
            result['failed'] = True
            result['msg'] = "src is required"
            return result
        elif remote_src:
            # everything is remote, so we just execute the module
            # without changing any of the module arguments
            result.update(self._execute_module(task_vars=task_vars))
            return result

        try:
            src = self._find_needle('files', src)
        except AnsibleError as e:
            result['failed'] = True
            result['msg'] = to_native(e)
            return result

        # create the remote tmp dir if needed, and put the source file there
        if tmp is None or "-tmp-" not in tmp:
            tmp = self._make_tmp_path()

        tmp_src = self._connection._shell.join_path(tmp, os.path.basename(src))
        self._transfer_file(src, tmp_src)

        self._fixup_perms2((tmp, tmp_src))

        new_module_args = self._task.args.copy()
        new_module_args.update(
            dict(
                src=tmp_src,
            )
        )

        result.update(self._execute_module('patch', module_args=new_module_args, task_vars=task_vars))
        self._remove_tmp_path(tmp)
        return result
Exemplo n.º 15
0
    def _add_host_to_composed_groups(self, groups, variables, host, strict=False):
        ''' helper to create complex groups for plugins based on jinaj2 conditionals, hosts that meet the conditional are added to group'''
        # process each 'group entry'
        if groups and isinstance(groups, dict):
            self.templar.set_available_variables(variables)
            for group_name in groups:
                conditional = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % groups[group_name]
                try:
                    result = boolean(self.templar.template(conditional))
                except Exception as e:
                    if strict:
                        raise AnsibleParserError("Could not add host %s to group %s: %s" % (host, group_name, to_native(e)))
                    continue

                if result:
                    # ensure group exists
                    self.inventory.add_group(group_name)
                    # add host to group
                    self.inventory.add_child(group_name, host)
Exemplo n.º 16
0
    def run(self, tmp=None, task_vars=None):
        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        src = self._task.args.get('src', None)
        remote_src = boolean(self._task.args.get('remote_src', 'no'), strict=False)

        try:
            if src is None:
                raise AnsibleActionFail("src is required")
            elif remote_src:
                # everything is remote, so we just execute the module
                # without changing any of the module arguments
                raise _AnsibleActionDone(result=self._execute_module(task_vars=task_vars))

            try:
                src = self._find_needle('files', src)
            except AnsibleError as e:
                raise AnsibleActionFail(to_native(e))

            tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, os.path.basename(src))
            self._transfer_file(src, tmp_src)
            self._fixup_perms2((self._connection._shell.tmpdir, tmp_src))

            new_module_args = self._task.args.copy()
            new_module_args.update(
                dict(
                    src=tmp_src,
                )
            )

            result.update(self._execute_module('patch', module_args=new_module_args, task_vars=task_vars))
        except AnsibleAction as e:
            result.update(e.result)
        finally:
            self._remove_tmp_path(self._connection._shell.tmpdir)
        return result
Exemplo n.º 17
0
    def _kerb_auth(self, principal, password):
        if password is None:
            password = ""

        self._kerb_ccache = tempfile.NamedTemporaryFile()
        display.vvvvv("creating Kerberos CC at %s" % self._kerb_ccache.name)
        krb5ccname = "FILE:%s" % self._kerb_ccache.name
        os.environ["KRB5CCNAME"] = krb5ccname
        krb5env = dict(KRB5CCNAME=krb5ccname)

        # stores various flags to call with kinit, we currently only use this
        # to set -f so we can get a forward-able ticket (cred delegation)
        kinit_flags = []
        if boolean(
                self.get_option('_extras').get(
                    'ansible_winrm_kerberos_delegation', False)):
            kinit_flags.append('-f')

        kinit_cmdline = [self._kinit_cmd]
        kinit_cmdline.extend(kinit_flags)
        kinit_cmdline.append(principal)

        # pexpect runs the process in its own pty so it can correctly send
        # the password as input even on MacOS which blocks subprocess from
        # doing so. Unfortunately it is not available on the built in Python
        # so we can only use it if someone has installed it
        if HAS_PEXPECT:
            proc_mechanism = "pexpect"
            command = kinit_cmdline.pop(0)
            password = to_text(password,
                               encoding='utf-8',
                               errors='surrogate_or_strict')

            display.vvvv("calling kinit with pexpect for principal %s" %
                         principal)
            try:
                child = pexpect.spawn(command,
                                      kinit_cmdline,
                                      timeout=60,
                                      env=krb5env,
                                      echo=False)
            except pexpect.ExceptionPexpect as err:
                err_msg = "Kerberos auth failure when calling kinit cmd " \
                          "'%s': %s" % (command, to_native(err))
                raise AnsibleConnectionFailure(err_msg)

            try:
                child.expect(".*:")
                child.sendline(password)
            except OSError as err:
                # child exited before the pass was sent, Ansible will raise
                # error based on the rc below, just display the error here
                display.vvvv("kinit with pexpect raised OSError: %s" %
                             to_native(err))

            # technically this is the stdout + stderr but to match the
            # subprocess error checking behaviour, we will call it stderr
            stderr = child.read()
            child.wait()
            rc = child.exitstatus
        else:
            proc_mechanism = "subprocess"
            password = to_bytes(password,
                                encoding='utf-8',
                                errors='surrogate_or_strict')

            display.vvvv("calling kinit with subprocess for principal %s" %
                         principal)
            try:
                p = subprocess.Popen(kinit_cmdline,
                                     stdin=subprocess.PIPE,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE,
                                     env=krb5env)

            except OSError as err:
                err_msg = "Kerberos auth failure when calling kinit cmd " \
                          "'%s': %s" % (self._kinit_cmd, to_native(err))
                raise AnsibleConnectionFailure(err_msg)

            stdout, stderr = p.communicate(password + b'\n')
            rc = p.returncode != 0

        if rc != 0:
            # one last attempt at making sure the password does not exist
            # in the output
            exp_msg = to_native(stderr.strip())
            exp_msg = exp_msg.replace(to_native(password), "<redacted>")

            err_msg = "Kerberos auth failure for principal %s with %s: %s" \
                      % (principal, proc_mechanism, exp_msg)
            raise AnsibleConnectionFailure(err_msg)

        display.vvvvv("kinit succeeded for principal %s" % principal)
Exemplo n.º 18
0
    def post_validate(self, templar):
        '''
        we can't tell that everything is of the right type until we have
        all the variables.  Run basic types (from isa) as well as
        any _post_validate_<foo> functions.
        '''

        # save the omit value for later checking
        omit_value = templar._available_variables.get('omit')

        for (name, attribute) in iteritems(self._valid_attrs):

            if getattr(self, name) is None:
                if not attribute.required:
                    continue
                else:
                    raise AnsibleParserError(
                        "the field '%s' is required but was not set" % name)
            elif not attribute.always_post_validate and self.__class__.__name__ not in (
                    'Task', 'Handler', 'PlayContext'):
                # Intermediate objects like Play() won't have their fields validated by
                # default, as their values are often inherited by other objects and validated
                # later, so we don't want them to fail out early
                continue

            try:
                # Run the post-validator if present. These methods are responsible for
                # using the given templar to template the values, if required.
                method = getattr(self, '_post_validate_%s' % name, None)
                if method:
                    value = method(attribute, getattr(self, name), templar)
                elif attribute.isa == 'class':
                    value = getattr(self, name)
                else:
                    # if the attribute contains a variable, template it now
                    value = templar.template(getattr(self, name))

                # if this evaluated to the omit value, set the value back to
                # the default specified in the FieldAttribute and move on
                if omit_value is not None and value == omit_value:
                    if callable(attribute.default):
                        setattr(self, name, attribute.default())
                    else:
                        setattr(self, name, attribute.default)
                    continue

                # and make sure the attribute is of the type it should be
                if value is not None:
                    if attribute.isa == 'string':
                        value = to_text(value)
                    elif attribute.isa == 'int':
                        value = int(value)
                    elif attribute.isa == 'float':
                        value = float(value)
                    elif attribute.isa == 'bool':
                        value = boolean(value, strict=False)
                    elif attribute.isa == 'percent':
                        # special value, which may be an integer or float
                        # with an optional '%' at the end
                        if isinstance(value, string_types) and '%' in value:
                            value = value.replace('%', '')
                        value = float(value)
                    elif attribute.isa == 'list':
                        if value is None:
                            value = []
                        elif not isinstance(value, list):
                            value = [value]
                        if attribute.listof is not None:
                            for item in value:
                                if not isinstance(item, attribute.listof):
                                    raise AnsibleParserError(
                                        "the field '%s' should be a list of %s, "
                                        "but the item '%s' is a %s" %
                                        (name, attribute.listof, item,
                                         type(item)),
                                        obj=self.get_ds())
                                elif attribute.required and attribute.listof == string_types:
                                    if item is None or item.strip() == "":
                                        raise AnsibleParserError(
                                            "the field '%s' is required, and cannot have empty values"
                                            % (name, ),
                                            obj=self.get_ds())
                    elif attribute.isa == 'set':
                        if value is None:
                            value = set()
                        elif not isinstance(value, (list, set)):
                            if isinstance(value, string_types):
                                value = value.split(',')
                            else:
                                # Making a list like this handles strings of
                                # text and bytes properly
                                value = [value]
                        if not isinstance(value, set):
                            value = set(value)
                    elif attribute.isa == 'dict':
                        if value is None:
                            value = dict()
                        elif not isinstance(value, dict):
                            raise TypeError("%s is not a dictionary" % value)
                    elif attribute.isa == 'class':
                        if not isinstance(value, attribute.class_type):
                            raise TypeError(
                                "%s is not a valid %s (got a %s instead)" %
                                (name, attribute.class_type, type(value)))
                        value.post_validate(templar=templar)

                # and assign the massaged value back to the attribute field
                setattr(self, name, value)
            except (TypeError, ValueError) as e:
                value = getattr(self, name)
                raise AnsibleParserError(
                    "the field '%s' has an invalid value (%s), and could not be converted to an %s."
                    "The error was: %s" % (name, value, attribute.isa, e),
                    obj=self.get_ds(),
                    orig_exc=e)
            except (AnsibleUndefinedVariable, UndefinedError) as e:
                if templar._fail_on_undefined_errors and name != 'name':
                    if name == 'args':
                        msg = "The task includes an option with an undefined variable. The error was: %s" % (
                            to_native(e))
                    else:
                        msg = "The field '%s' has an invalid value, which includes an undefined variable. The error was: %s" % (
                            name, to_native(e))
                    raise AnsibleParserError(msg,
                                             obj=self.get_ds(),
                                             orig_exc=e)

        self._finalized = True
Exemplo n.º 19
0
def ensure_type(value, value_type, origin=None):
    ''' return a configuration variable with casting
    :arg value: The value to ensure correct typing of
    :kwarg value_type: The type of the value.  This can be any of the following strings:
        :boolean: sets the value to a True or False value
        :integer: Sets the value to an integer or raises a ValueType error
        :float: Sets the value to a float or raises a ValueType error
        :list: Treats the value as a comma separated list.  Split the value
            and return it as a python list.
        :none: Sets the value to None
        :path: Expands any environment variables and tilde's in the value.
        :tmp_path: Create a unique temporary directory inside of the directory
            specified by value and return its path.
        :pathlist: Treat the value as a typical PATH string.  (On POSIX, this
            means colon separated strings.)  Split the value and then expand
            each part for environment variables and tildes.
    '''

    basedir = None
    if origin and os.path.isabs(origin) and os.path.exists(origin):
        basedir = origin

    if value_type:
        value_type = value_type.lower()

    if value_type in ('boolean', 'bool'):
        value = boolean(value, strict=False)

    elif value is not None:
        if value_type in ('integer', 'int'):
            value = int(value)

        elif value_type == 'float':
            value = float(value)

        elif value_type == 'list':
            if isinstance(value, string_types):
                value = [x.strip() for x in value.split(',')]

        elif value_type == 'none':
            if value == "None":
                value = None

        elif value_type == 'path':
            value = resolve_path(value, basedir=basedir)

        elif value_type in ('tmp', 'temppath', 'tmppath'):
            value = resolve_path(value, basedir=basedir)
            if not os.path.exists(value):
                makedirs_safe(value, 0o700)
            prefix = 'ansible-local-%s' % os.getpid()
            value = tempfile.mkdtemp(prefix=prefix, dir=value)

        elif value_type == 'pathspec':
            if isinstance(value, string_types):
                value = value.split(os.pathsep)
            value = [resolve_path(x, basedir=basedir) for x in value]

        elif value_type == 'pathlist':
            if isinstance(value, string_types):
                value = value.split(',')
            value = [resolve_path(x, basedir=basedir) for x in value]

        elif value_type in ('str', 'string'):
            value = unquote(to_text(value, errors='surrogate_or_strict'))

        # defaults to string type
        elif isinstance(value, string_types):
            value = unquote(value)

    return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
Exemplo n.º 20
0
    def run(self, terms, variables, **kwargs):

        anydict = False
        skip = False

        for term in terms:
            if isinstance(term, dict):
                anydict = True

        total_search = []
        if anydict:
            for term in terms:
                if isinstance(term, dict):
                    files = term.get('files', [])
                    paths = term.get('paths', [])
                    skip = boolean(term.get('skip', False), strict=False)

                    filelist = files
                    if isinstance(files, string_types):
                        files = files.replace(',', ' ')
                        files = files.replace(';', ' ')
                        filelist = files.split(' ')

                    pathlist = paths
                    if paths:
                        if isinstance(paths, string_types):
                            paths = paths.replace(',', ' ')
                            paths = paths.replace(':', ' ')
                            paths = paths.replace(';', ' ')
                            pathlist = paths.split(' ')

                    if not pathlist:
                        total_search = filelist
                    else:
                        for path in pathlist:
                            for fn in filelist:
                                f = os.path.join(path, fn)
                                total_search.append(f)
                else:
                    total_search.append(term)
        else:
            total_search = self._flatten(terms)

        for fn in total_search:
            try:
                fn = self._templar.template(fn)
            except (AnsibleUndefinedVariable, UndefinedError):
                continue

            # get subdir if set by task executor, default to files otherwise
            subdir = getattr(self, '_subdir', 'files')
            path = None
            path = self.find_file_in_search_path(variables,
                                                 subdir,
                                                 fn,
                                                 ignore_missing=True)
            if path is not None:
                return [path]
        if skip:
            return []
        raise AnsibleLookupError(
            "No file was found when using with_first_found. Use the 'skip: true' option to allow this task to be skipped if no "
            "files are found")
Exemplo n.º 21
0
    def run(self, tmp=None, task_vars=None):
        ''' handler for template operations '''

        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        source = self._task.args.get('src', None)
        dest = self._task.args.get('dest', None)
        force = boolean(self._task.args.get('force', True), strict=False)
        follow = boolean(self._task.args.get('follow', False), strict=False)
        state = self._task.args.get('state', None)
        newline_sequence = self._task.args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)
        variable_start_string = self._task.args.get('variable_start_string', None)
        variable_end_string = self._task.args.get('variable_end_string', None)
        block_start_string = self._task.args.get('block_start_string', None)
        block_end_string = self._task.args.get('block_end_string', None)
        trim_blocks = boolean(self._task.args.get('trim_blocks', True), strict=False)
        lstrip_blocks = boolean(self._task.args.get('lstrip_blocks', False), strict=False)

        # Option `lstrip_blocks' was added in Jinja2 version 2.7.
        if lstrip_blocks:
            try:
                import jinja2.defaults
            except ImportError:
                raise AnsibleError('Unable to import Jinja2 defaults for determing Jinja2 features.')

            try:
                jinja2.defaults.LSTRIP_BLOCKS
            except AttributeError:
                raise AnsibleError("Option `lstrip_blocks' is only available in Jinja2 versions >=2.7")

        wrong_sequences = ["\\n", "\\r", "\\r\\n"]
        allowed_sequences = ["\n", "\r", "\r\n"]

        # We need to convert unescaped sequences to proper escaped sequences for Jinja2
        if newline_sequence in wrong_sequences:
            newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]

        try:
            for s_type in ('source', 'dest', 'state', 'newline_sequence', 'variable_start_string', 'variable_end_string', 'block_start_string',
                           'block_end_string'):
                value = locals()[s_type]
                value = ensure_type(value, 'string')
                if value is not None and not isinstance(value, string_types):
                    raise AnsibleActionFail("%s is expected to be a string, but got %s instead" % (s_type, type(value)))
                locals()[s_type] = value

            for b_type in ('force', 'follow', 'trim_blocks'):
                value = locals()[b_type]
                value = ensure_type(value, 'string')
                if value is not None and not isinstance(value, bool):
                    raise AnsibleActionFail("%s is expected to be a boolean, but got %s instead" % (b_type, type(value)))
                locals()[b_type] = value

            if state is not None:
                raise AnsibleActionFail("'state' cannot be specified on a template")
            elif source is None or dest is None:
                raise AnsibleActionFail("src and dest are required")
            elif newline_sequence not in allowed_sequences:
                raise AnsibleActionFail("newline_sequence needs to be one of: \n, \r or \r\n")
            else:
                try:
                    source = self._find_needle('templates', source)
                except AnsibleError as e:
                    raise AnsibleActionFail(to_text(e))

            mode = self._task.args.get('mode', None)
            if mode == 'preserve':
                mode = '0%03o' % stat.S_IMODE(os.stat(source).st_mode)

            # Get vault decrypted tmp file
            try:
                tmp_source = self._loader.get_real_file(source)
            except AnsibleFileNotFound as e:
                raise AnsibleActionFail("could not find src=%s, %s" % (source, to_text(e)))
            b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')

            # template the source data locally & get ready to transfer
            try:
                with open(b_tmp_source, 'rb') as f:
                    template_data = to_text(f.read(), errors='surrogate_or_strict')

                # set jinja2 internal search path for includes
                searchpath = task_vars.get('ansible_search_path', [])
                searchpath.extend([self._loader._basedir, os.path.dirname(source)])

                # We want to search into the 'templates' subdir of each search path in
                # addition to our original search paths.
                newsearchpath = []
                for p in searchpath:
                    newsearchpath.append(os.path.join(p, 'templates'))
                    newsearchpath.append(p)
                searchpath = newsearchpath

                self._templar.environment.loader.searchpath = searchpath
                self._templar.environment.newline_sequence = newline_sequence
                if block_start_string is not None:
                    self._templar.environment.block_start_string = block_start_string
                if block_end_string is not None:
                    self._templar.environment.block_end_string = block_end_string
                if variable_start_string is not None:
                    self._templar.environment.variable_start_string = variable_start_string
                if variable_end_string is not None:
                    self._templar.environment.variable_end_string = variable_end_string
                self._templar.environment.trim_blocks = trim_blocks
                self._templar.environment.lstrip_blocks = lstrip_blocks

                # add ansible 'template' vars
                temp_vars = task_vars.copy()
                temp_vars.update(generate_ansible_template_vars(source))

                old_vars = self._templar._available_variables
                self._templar.set_available_variables(temp_vars)
                resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
                self._templar.set_available_variables(old_vars)
            except AnsibleAction:
                raise
            except Exception as e:
                raise AnsibleActionFail("%s: %s" % (type(e).__name__, to_text(e)))
            finally:
                self._loader.cleanup_tmp_file(b_tmp_source)

            new_task = self._task.copy()
            # mode is either the mode from task.args or the mode of the source file if the task.args
            # mode == 'preserve'
            new_task.args['mode'] = mode
            new_task.args.pop('newline_sequence', None)
            new_task.args.pop('block_start_string', None)
            new_task.args.pop('block_end_string', None)
            new_task.args.pop('variable_start_string', None)
            new_task.args.pop('variable_end_string', None)
            new_task.args.pop('trim_blocks', None)
            new_task.args.pop('lstrip_blocks', None)

            local_tempdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)

            try:
                result_file = os.path.join(local_tempdir, os.path.basename(source))
                with open(to_bytes(result_file, errors='surrogate_or_strict'), 'wb') as f:
                    f.write(to_bytes(resultant, errors='surrogate_or_strict'))

                new_task.args.update(
                    dict(
                        src=result_file,
                        dest=dest,
                        follow=follow,
                    ),
                )
                copy_action = self._shared_loader_obj.action_loader.get('copy',
                                                                        task=new_task,
                                                                        connection=self._connection,
                                                                        play_context=self._play_context,
                                                                        loader=self._loader,
                                                                        templar=self._templar,
                                                                        shared_loader_obj=self._shared_loader_obj)
                result.update(copy_action.run(task_vars=task_vars))
            finally:
                shutil.rmtree(to_bytes(local_tempdir, errors='surrogate_or_strict'))

        except AnsibleAction as e:
            result.update(e.result)
        finally:
            self._remove_tmp_path(self._connection._shell.tmpdir)

        return result
Exemplo n.º 22
0
def mk_boolean(value):
    ''' moved to module_utils'''
    _deprecated('ansible.constants.mk_boolean() is deprecated.  Use ansible.module_utils.parsing.convert_bool.boolean() instead')
    return boolean(value, strict=False)
Exemplo n.º 23
0
    def run(self, tmp=None, task_vars=None):
        ''' handler for template operations '''

        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)

        source = self._task.args.get('src', None)
        dest = self._task.args.get('dest', None)
        force = boolean(self._task.args.get('force', True), strict=False)
        state = self._task.args.get('state', None)
        newline_sequence = self._task.args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)
        variable_start_string = self._task.args.get('variable_start_string', None)
        variable_end_string = self._task.args.get('variable_end_string', None)
        block_start_string = self._task.args.get('block_start_string', None)
        block_end_string = self._task.args.get('block_end_string', None)
        trim_blocks = self._task.args.get('trim_blocks', None)

        wrong_sequences = ["\\n", "\\r", "\\r\\n"]
        allowed_sequences = ["\n", "\r", "\r\n"]

        # We need to convert unescaped sequences to proper escaped sequences for Jinja2
        if newline_sequence in wrong_sequences:
            newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]

        if state is not None:
            result['failed'] = True
            result['msg'] = "'state' cannot be specified on a template"
        elif source is None or dest is None:
            result['failed'] = True
            result['msg'] = "src and dest are required"
        elif newline_sequence not in allowed_sequences:
            result['failed'] = True
            result['msg'] = "newline_sequence needs to be one of: \n, \r or \r\n"
        else:
            try:
                source = self._find_needle('templates', source)
            except AnsibleError as e:
                result['failed'] = True
                result['msg'] = to_text(e)

        if 'failed' in result:
            return result

        # Expand any user home dir specification
        dest = self._remote_expand_user(dest)

        directory_prepended = False
        if dest.endswith(os.sep):
            # Optimization.  trailing slash means we know it's a directory
            directory_prepended = True
            dest = self._connection._shell.join_path(dest, os.path.basename(source))
        else:
            # Find out if it's a directory
            dest_stat = self._execute_remote_stat(dest, task_vars, True, tmp=tmp)
            if dest_stat['exists'] and dest_stat['isdir']:
                dest = self._connection._shell.join_path(dest, os.path.basename(source))

        # Get vault decrypted tmp file
        try:
            tmp_source = self._loader.get_real_file(source)
        except AnsibleFileNotFound as e:
            result['failed'] = True
            result['msg'] = "could not find src=%s, %s" % (source, e)
            self._remove_tmp_path(tmp)
            return result

        # template the source data locally & get ready to transfer
        try:
            with open(tmp_source, 'r') as f:
                template_data = to_text(f.read())

            # set jinja2 internal search path for includes
            searchpath = task_vars.get('ansible_search_path', [])
            searchpath.extend([self._loader._basedir, os.path.dirname(source)])

            # We want to search into the 'templates' subdir of each search path in
            # addition to our original search paths.
            newsearchpath = []
            for p in searchpath:
                newsearchpath.append(os.path.join(p, 'templates'))
                newsearchpath.append(p)
            searchpath = newsearchpath

            self._templar.environment.loader.searchpath = searchpath
            self._templar.environment.newline_sequence = newline_sequence
            if block_start_string is not None:
                self._templar.environment.block_start_string = block_start_string
            if block_end_string is not None:
                self._templar.environment.block_end_string = block_end_string
            if variable_start_string is not None:
                self._templar.environment.variable_start_string = variable_start_string
            if variable_end_string is not None:
                self._templar.environment.variable_end_string = variable_end_string
            if trim_blocks is not None:
                self._templar.environment.trim_blocks = bool(trim_blocks)

            # add ansible 'template' vars
            temp_vars = task_vars.copy()
            temp_vars.update(generate_ansible_template_vars(source))

            old_vars = self._templar._available_variables
            self._templar.set_available_variables(temp_vars)
            resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
            self._templar.set_available_variables(old_vars)
        except Exception as e:
            result['failed'] = True
            result['msg'] = "%s: %s" % (type(e).__name__, to_text(e))
            return result
        finally:
            self._loader.cleanup_tmp_file(tmp_source)

        if not tmp:
            tmp = self._make_tmp_path()

        local_checksum = checksum_s(resultant)
        remote_checksum = self.get_checksum(dest, task_vars, not directory_prepended, source=source, tmp=tmp)
        if isinstance(remote_checksum, dict):
            # Error from remote_checksum is a dict.  Valid return is a str
            result.update(remote_checksum)
            return result

        diff = {}
        new_module_args = self._task.args.copy()

        # remove newline_sequence from standard arguments
        new_module_args.pop('newline_sequence', None)
        new_module_args.pop('block_start_string', None)
        new_module_args.pop('block_end_string', None)
        new_module_args.pop('variable_start_string', None)
        new_module_args.pop('variable_end_string', None)
        new_module_args.pop('trim_blocks', None)

        if (remote_checksum == '1') or (force and local_checksum != remote_checksum):

            result['changed'] = True
            # if showing diffs, we need to get the remote value
            if self._play_context.diff:
                diff = self._get_diff_data(dest, resultant, task_vars, source_file=False)

            if not self._play_context.check_mode:  # do actual work through copy
                xfered = self._transfer_data(self._connection._shell.join_path(tmp, 'source'), resultant)

                # fix file permissions when the copy is done as a different user
                self._fixup_perms2((tmp, xfered))

                # run the copy module
                new_module_args.update(
                    dict(
                        src=xfered,
                        dest=dest,
                        original_basename=os.path.basename(source),
                        follow=True,
                    ),
                )
                result.update(self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False))

            if result.get('changed', False) and self._play_context.diff:
                result['diff'] = diff

        else:
            # when running the file module based on the template data, we do
            # not want the source filename (the name of the template) to be used,
            # since this would mess up links, so we clear the src param and tell
            # the module to follow links.  When doing that, we have to set
            # original_basename to the template just in case the dest is
            # a directory.
            new_module_args.update(
                dict(
                    src=None,
                    original_basename=os.path.basename(source),
                    follow=True,
                ),
            )
            result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False))

        self._remove_tmp_path(tmp)

        return result
Exemplo n.º 24
0
    def run(self, tmp=None, task_vars=None):
        ''' handler for fetch operations '''
        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)

        if self._play_context.check_mode:
            result['skipped'] = True
            result['msg'] = 'check mode not (yet) supported for this module'
            return result

        source = self._task.args.get('src', None)
        dest = self._task.args.get('dest', None)
        flat = boolean(self._task.args.get('flat'), strict=False)
        fail_on_missing = boolean(self._task.args.get('fail_on_missing'),
                                  strict=False)
        validate_checksum = boolean(self._task.args.get(
            'validate_checksum', self._task.args.get('validate_md5', True)),
                                    strict=False)

        # validate source and dest are strings FIXME: use basic.py and module specs
        if not isinstance(source, string_types):
            result[
                'msg'] = "Invalid type supplied for source option, it must be a string"

        if not isinstance(dest, string_types):
            result[
                'msg'] = "Invalid type supplied for dest option, it must be a string"

        # validate_md5 is the deprecated way to specify validate_checksum
        if 'validate_md5' in self._task.args and 'validate_checksum' in self._task.args:
            result[
                'msg'] = "validate_checksum and validate_md5 cannot both be specified"

        if 'validate_md5' in self._task.args:
            display.deprecated('Use validate_checksum instead of validate_md5',
                               version='2.8')

        if source is None or dest is None:
            result['msg'] = "src and dest are required"

        if result.get('msg'):
            result['failed'] = True
            return result

        source = self._connection._shell.join_path(source)
        source = self._remote_expand_user(source)

        remote_checksum = None
        if not self._play_context.become:
            # calculate checksum for the remote file, don't bother if using become as slurp will be used
            # Force remote_checksum to follow symlinks because fetch always follows symlinks
            remote_checksum = self._remote_checksum(source,
                                                    all_vars=task_vars,
                                                    follow=True)

        # use slurp if permissions are lacking or privilege escalation is needed
        remote_data = None
        if remote_checksum in ('1', '2', None):
            slurpres = self._execute_module(module_name='slurp',
                                            module_args=dict(src=source),
                                            task_vars=task_vars,
                                            tmp=tmp)
            if slurpres.get('failed'):
                if not fail_on_missing and (
                        slurpres.get('msg').startswith('file not found')
                        or remote_checksum == '1'):
                    result[
                        'msg'] = "the remote file does not exist, not transferring, ignored"
                    result['file'] = source
                    result['changed'] = False
                else:
                    result.update(slurpres)
                return result
            else:
                if slurpres['encoding'] == 'base64':
                    remote_data = base64.b64decode(slurpres['content'])
                if remote_data is not None:
                    remote_checksum = checksum_s(remote_data)
                # the source path may have been expanded on the
                # target system, so we compare it here and use the
                # expanded version if it's different
                remote_source = slurpres.get('source')
                if remote_source and remote_source != source:
                    source = remote_source

        # calculate the destination name
        if os.path.sep not in self._connection._shell.join_path('a', ''):
            source = self._connection._shell._unquote(source)
            source_local = source.replace('\\', '/')
        else:
            source_local = source

        dest = os.path.expanduser(dest)
        if flat:
            if dest.endswith(os.sep):
                # if the path ends with "/", we'll use the source filename as the
                # destination filename
                base = os.path.basename(source_local)
                dest = os.path.join(dest, base)
            if not dest.startswith("/"):
                # if dest does not start with "/", we'll assume a relative path
                dest = self._loader.path_dwim(dest)
        else:
            # files are saved in dest dir, with a subdir for each host, then the filename
            if 'inventory_hostname' in task_vars:
                target_name = task_vars['inventory_hostname']
            else:
                target_name = self._play_context.remote_addr
            dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name,
                                 source_local)

        dest = dest.replace("//", "/")

        if remote_checksum in ('0', '1', '2', '3', '4', '5'):
            result['changed'] = False
            result['file'] = source
            if remote_checksum == '0':
                result[
                    'msg'] = "unable to calculate the checksum of the remote file"
            elif remote_checksum == '1':
                result['msg'] = "the remote file does not exist"
            elif remote_checksum == '2':
                result['msg'] = "no read permission on remote file"
            elif remote_checksum == '3':
                result[
                    'msg'] = "remote file is a directory, fetch cannot work on directories"
            elif remote_checksum == '4':
                result[
                    'msg'] = "python isn't present on the system.  Unable to compute checksum"
            elif remote_checksum == '5':
                result[
                    'msg'] = "stdlib json or simplejson was not found on the remote machine. Only the raw module can work without those installed"
            # Historically, these don't fail because you may want to transfer
            # a log file that possibly MAY exist but keep going to fetch other
            # log files. Today, this is better achieved by adding
            # ignore_errors or failed_when to the task.  Control the behaviour
            # via fail_when_missing
            if fail_on_missing:
                result['failed'] = True
                del result['changed']
            else:
                result['msg'] += ", not transferring, ignored"
            return result

        # calculate checksum for the local file
        local_checksum = checksum(dest)

        if remote_checksum != local_checksum:
            # create the containing directories, if needed
            makedirs_safe(os.path.dirname(dest))

            # fetch the file and check for changes
            if remote_data is None:
                self._connection.fetch_file(source, dest)
            else:
                try:
                    f = open(to_bytes(dest, errors='surrogate_or_strict'),
                             'wb')
                    f.write(remote_data)
                    f.close()
                except (IOError, OSError) as e:
                    raise AnsibleError("Failed to fetch the file: %s" % e)
            new_checksum = secure_hash(dest)
            # For backwards compatibility. We'll return None on FIPS enabled systems
            try:
                new_md5 = md5(dest)
            except ValueError:
                new_md5 = None

            if validate_checksum and new_checksum != remote_checksum:
                result.update(
                    dict(failed=True,
                         md5sum=new_md5,
                         msg="checksum mismatch",
                         file=source,
                         dest=dest,
                         remote_md5sum=None,
                         checksum=new_checksum,
                         remote_checksum=remote_checksum))
            else:
                result.update({
                    'changed': True,
                    'md5sum': new_md5,
                    'dest': dest,
                    'remote_md5sum': None,
                    'checksum': new_checksum,
                    'remote_checksum': remote_checksum
                })
        else:
            # For backwards compatibility. We'll return None on FIPS enabled systems
            try:
                local_md5 = md5(dest)
            except ValueError:
                local_md5 = None
            result.update(
                dict(changed=False,
                     md5sum=local_md5,
                     file=source,
                     dest=dest,
                     checksum=local_checksum))

        return result
Exemplo n.º 25
0
    def _get_connection(self, cvars, templar):
        '''
        Reads the connection property for the host, and returns the
        correct connection object from the list of connection plugins
        '''

        # use magic var if it exists, if not, let task inheritance do it's thing.
        if cvars.get('ansible_connection') is not None:
            self._play_context.connection = templar.template(cvars['ansible_connection'])
        else:
            self._play_context.connection = self._task.connection

        # TODO: play context has logic to update the connection for 'smart'
        # (default value, will chose between ssh and paramiko) and 'persistent'
        # (really paramiko), eventually this should move to task object itself.
        connection_name = self._play_context.connection

        # load connection
        conn_type = connection_name
        connection, plugin_load_context = self._shared_loader_obj.connection_loader.get_with_context(
            conn_type,
            self._play_context,
            self._new_stdin,
            task_uuid=self._task._uuid,
            ansible_playbook_pid=to_text(os.getppid())
        )

        if not connection:
            raise AnsibleError("the connection plugin '%s' was not found" % conn_type)

        # load become plugin if needed
        if cvars.get('ansible_become') is not None:
            become = boolean(templar.template(cvars['ansible_become']))
        else:
            become = self._task.become

        if become:
            if cvars.get('ansible_become_method'):
                become_plugin = self._get_become(templar.template(cvars['ansible_become_method']))
            else:
                become_plugin = self._get_become(self._task.become_method)

            try:
                connection.set_become_plugin(become_plugin)
            except AttributeError:
                # Older connection plugin that does not support set_become_plugin
                pass

            if getattr(connection.become, 'require_tty', False) and not getattr(connection, 'has_tty', False):
                raise AnsibleError(
                    "The '%s' connection does not provide a TTY which is required for the selected "
                    "become plugin: %s." % (conn_type, become_plugin.name)
                )

            # Backwards compat for connection plugins that don't support become plugins
            # Just do this unconditionally for now, we could move it inside of the
            # AttributeError above later
            self._play_context.set_become_plugin(become_plugin.name)

        # Also backwards compat call for those still using play_context
        self._play_context.set_attributes_from_plugin(connection)

        if any(((connection.supports_persistence and C.USE_PERSISTENT_CONNECTIONS), connection.force_persistence)):
            self._play_context.timeout = connection.get_option('persistent_command_timeout')
            display.vvvv('attempting to start connection', host=self._play_context.remote_addr)
            display.vvvv('using connection plugin %s' % connection.transport, host=self._play_context.remote_addr)

            options = self._get_persistent_connection_options(connection, cvars, templar)
            socket_path = start_connection(self._play_context, options, self._task._uuid)
            display.vvvv('local domain socket path is %s' % socket_path, host=self._play_context.remote_addr)
            setattr(connection, '_socket_path', socket_path)

        return connection
Exemplo n.º 26
0
    def run(self, tmp=None, task_vars=None):
        ''' generates params and passes them on to the rsync module '''
        # When modifying this function be aware of the tricky convolutions
        # your thoughts have to go through:
        #
        # In normal ansible, we connect from controller to inventory_hostname
        # (playbook's hosts: field) or controller to delegate_to host and run
        # a module on one of those hosts.
        #
        # So things that are directly related to the core of ansible are in
        # terms of that sort of connection that always originate on the
        # controller.
        #
        # In synchronize we use ansible to connect to either the controller or
        # to the delegate_to host and then run rsync which makes its own
        # connection from controller to inventory_hostname or delegate_to to
        # inventory_hostname.
        #
        # That means synchronize needs to have some knowledge of the
        # controller to inventory_host/delegate host that ansible typically
        # establishes and use those to construct a command line for rsync to
        # connect from the inventory_host to the controller/delegate.  The
        # challenge for coders is remembering which leg of the trip is
        # associated with the conditions that you're checking at any one time.
        if task_vars is None:
            task_vars = dict()

        # We make a copy of the args here because we may fail and be asked to
        # retry. If that happens we don't want to pass the munged args through
        # to our next invocation. Munged args are single use only.
        _tmp_args = self._task.args.copy()

        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        # Store remote connection type
        self._remote_transport = self._connection.transport

        # Handle docker connection options
        if self._remote_transport in ['docker', 'community.general.docker']:
            self._docker_cmd = self._connection.docker_cmd
            if self._play_context.docker_extra_args:
                self._docker_cmd = "%s %s" % (self._docker_cmd, self._play_context.docker_extra_args)

        # self._connection accounts for delegate_to so
        # remote_transport is the transport ansible thought it would need
        # between the controller and the delegate_to host or the controller
        # and the remote_host if delegate_to isn't set.

        remote_transport = False
        if self._connection.transport != 'local':
            remote_transport = True

        try:
            delegate_to = self._task.delegate_to
        except (AttributeError, KeyError):
            delegate_to = None

        # ssh paramiko docker buildah and local are fully supported transports.  Anything
        # else only works with delegate_to
        if delegate_to is None and self._connection.transport not in \
                ('ssh', 'paramiko', 'local', 'docker', 'community.general.docker', 'buildah', 'containers.podman.buildah'):
            result['failed'] = True
            result['msg'] = (
                "synchronize uses rsync to function. rsync needs to connect to the remote "
                "host via ssh, docker client or a direct filesystem "
                "copy. This remote host is being accessed via %s instead "
                "so it cannot work." % self._connection.transport)
            return result

        use_ssh_args = _tmp_args.pop('use_ssh_args', None)

        # Parameter name needed by the ansible module
        _tmp_args['_local_rsync_path'] = task_vars.get('ansible_rsync_path') or 'rsync'
        _tmp_args['_local_rsync_password'] = task_vars.get('ansible_ssh_pass') or task_vars.get('ansible_password')

        # rsync thinks that one end of the connection is localhost and the
        # other is the host we're running the task for  (Note: We use
        # ansible's delegate_to mechanism to determine which host rsync is
        # running on so localhost could be a non-controller machine if
        # delegate_to is used)
        src_host = '127.0.0.1'
        inventory_hostname = task_vars.get('inventory_hostname')
        dest_host_inventory_vars = task_vars['hostvars'].get(inventory_hostname)
        try:
            dest_host = dest_host_inventory_vars['ansible_host']
        except KeyError:
            dest_host = dest_host_inventory_vars.get('ansible_ssh_host', inventory_hostname)

        dest_host_ids = [hostid for hostid in (dest_host_inventory_vars.get('inventory_hostname'),
                                               dest_host_inventory_vars.get('ansible_host'),
                                               dest_host_inventory_vars.get('ansible_ssh_host'))
                         if hostid is not None]

        localhost_ports = set()
        for host in C.LOCALHOST:
            localhost_vars = task_vars['hostvars'].get(host, {})
            for port_var in C.MAGIC_VARIABLE_MAPPING['port']:
                port = localhost_vars.get(port_var, None)
                if port:
                    break
            else:
                port = C.DEFAULT_REMOTE_PORT
            localhost_ports.add(port)

        # dest_is_local tells us if the host rsync runs on is the same as the
        # host rsync puts the files on.  This is about *rsync's connection*,
        # not about the ansible connection to run the module.
        dest_is_local = False
        if delegate_to is None and remote_transport is False:
            dest_is_local = True
        elif delegate_to is not None and delegate_to in dest_host_ids:
            dest_is_local = True

        # CHECK FOR NON-DEFAULT SSH PORT
        inv_port = task_vars.get('ansible_ssh_port', None) or C.DEFAULT_REMOTE_PORT
        if _tmp_args.get('dest_port', None) is None:
            if inv_port is not None:
                _tmp_args['dest_port'] = inv_port

        # Set use_delegate if we are going to run rsync on a delegated host
        # instead of localhost
        use_delegate = False
        if delegate_to is not None and delegate_to in dest_host_ids:
            # edge case: explicit delegate and dest_host are the same
            # so we run rsync on the remote machine targeting its localhost
            # (itself)
            dest_host = '127.0.0.1'
            use_delegate = True
        elif delegate_to is not None and remote_transport:
            # If we're delegating to a remote host then we need to use the
            # delegate_to settings
            use_delegate = True

        # Delegate to localhost as the source of the rsync unless we've been
        # told (via delegate_to) that a different host is the source of the
        # rsync
        if not use_delegate and remote_transport:
            # Create a connection to localhost to run rsync on
            new_stdin = self._connection._new_stdin

            # Unlike port, there can be only one shell
            localhost_shell = None
            for host in C.LOCALHOST:
                localhost_vars = task_vars['hostvars'].get(host, {})
                for shell_var in C.MAGIC_VARIABLE_MAPPING['shell']:
                    localhost_shell = localhost_vars.get(shell_var, None)
                    if localhost_shell:
                        break
                if localhost_shell:
                    break
            else:
                localhost_shell = os.path.basename(C.DEFAULT_EXECUTABLE)
            self._play_context.shell = localhost_shell

            # Unlike port, there can be only one executable
            localhost_executable = None
            for host in C.LOCALHOST:
                localhost_vars = task_vars['hostvars'].get(host, {})
                for executable_var in C.MAGIC_VARIABLE_MAPPING['executable']:
                    localhost_executable = localhost_vars.get(executable_var, None)
                    if localhost_executable:
                        break
                if localhost_executable:
                    break
            else:
                localhost_executable = C.DEFAULT_EXECUTABLE
            self._play_context.executable = localhost_executable

            new_connection = connection_loader.get('local', self._play_context, new_stdin)
            self._connection = new_connection
            # Override _remote_is_local as an instance attribute specifically for the synchronize use case
            # ensuring we set local tmpdir correctly
            self._connection._remote_is_local = True
            self._override_module_replaced_vars(task_vars)

        # SWITCH SRC AND DEST HOST PER MODE
        if _tmp_args.get('mode', 'push') == 'pull':
            (dest_host, src_host) = (src_host, dest_host)

        # MUNGE SRC AND DEST PER REMOTE_HOST INFO
        src = _tmp_args.get('src', None)
        dest = _tmp_args.get('dest', None)
        if src is None or dest is None:
            return dict(failed=True, msg="synchronize requires both src and dest parameters are set")

        # Determine if we need a user@
        user = None
        if not dest_is_local:
            # Src and dest rsync "path" handling
            if boolean(_tmp_args.get('set_remote_user', 'yes'), strict=False):
                if use_delegate:
                    user = task_vars.get('ansible_delegated_vars', dict()).get('ansible_ssh_user', None)
                    if not user:
                        user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
                    if not user:
                        user = C.DEFAULT_REMOTE_USER

                else:
                    user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user

            # Private key handling
            # Use the private_key parameter if passed else use context private_key_file
            _tmp_args['private_key'] = _tmp_args.get('private_key', self._play_context.private_key_file)

            # use the mode to define src and dest's url
            if _tmp_args.get('mode', 'push') == 'pull':
                # src is a remote path: <user>@<host>, dest is a local path
                src = self._process_remote(_tmp_args, src_host, src, user, inv_port in localhost_ports)
                dest = self._process_origin(dest_host, dest, user)
            else:
                # src is a local path, dest is a remote path: <user>@<host>
                src = self._process_origin(src_host, src, user)
                dest = self._process_remote(_tmp_args, dest_host, dest, user, inv_port in localhost_ports)
        else:
            # Still need to munge paths (to account for roles) even if we aren't
            # copying files between hosts
            if not src.startswith('/'):
                src = self._get_absolute_path(path=src)
            if not dest.startswith('/'):
                dest = self._get_absolute_path(path=dest)

        _tmp_args['src'] = src
        _tmp_args['dest'] = dest

        # Allow custom rsync path argument
        rsync_path = _tmp_args.get('rsync_path', None)

        # backup original become as we are probably about to unset it
        become = self._play_context.become

        if not dest_is_local:
            # don't escalate for docker. doing --rsync-path with docker exec fails
            # and we can switch directly to the user via docker arguments
            if self._play_context.become and not rsync_path and self._remote_transport not in ['docker', 'community.general.docker']:
                # If no rsync_path is set, become was originally set, and dest is
                # remote then add privilege escalation here.
                if self._play_context.become_method == 'sudo':
                    rsync_path = 'sudo rsync'
                # TODO: have to add in the rest of the become methods here

            # We cannot use privilege escalation on the machine running the
            # module.  Instead we run it on the machine rsync is connecting
            # to.
            self._play_context.become = False

        _tmp_args['rsync_path'] = rsync_path

        if use_ssh_args:
            ssh_args = [
                getattr(self._play_context, 'ssh_args', ''),
                getattr(self._play_context, 'ssh_common_args', ''),
                getattr(self._play_context, 'ssh_extra_args', ''),
            ]
            _tmp_args['ssh_args'] = ' '.join([a for a in ssh_args if a])

        # If launching synchronize against docker container
        # use rsync_opts to support container to override rsh options
        if self._remote_transport in ['docker', 'community.general.docker', 'buildah', 'containers.podman.buildah'] and not use_delegate:
            # Replicate what we do in the module argumentspec handling for lists
            if not isinstance(_tmp_args.get('rsync_opts'), MutableSequence):
                tmp_rsync_opts = _tmp_args.get('rsync_opts', [])
                if isinstance(tmp_rsync_opts, string_types):
                    tmp_rsync_opts = tmp_rsync_opts.split(',')
                elif isinstance(tmp_rsync_opts, (int, float)):
                    tmp_rsync_opts = [to_text(tmp_rsync_opts)]
                _tmp_args['rsync_opts'] = tmp_rsync_opts

            if '--blocking-io' not in _tmp_args['rsync_opts']:
                _tmp_args['rsync_opts'].append('--blocking-io')

            if self._remote_transport in ['docker', 'community.general.docker']:
                if become and self._play_context.become_user:
                    _tmp_args['rsync_opts'].append("--rsh=%s exec -u %s -i" % (self._docker_cmd, self._play_context.become_user))
                elif user is not None:
                    _tmp_args['rsync_opts'].append("--rsh=%s exec -u %s -i" % (self._docker_cmd, user))
                else:
                    _tmp_args['rsync_opts'].append("--rsh=%s exec -i" % self._docker_cmd)
            elif self._remote_transport in ['buildah', 'containers.podman.buildah']:
                _tmp_args['rsync_opts'].append("--rsh=buildah run --")

        # run the module and store the result
        result.update(self._execute_module('ansible.posix.synchronize', module_args=_tmp_args, task_vars=task_vars))

        return result
Exemplo n.º 27
0
    def _file_transport_command(self, in_path, out_path, sftp_action):
        # scp and sftp require square brackets for IPv6 addresses, but
        # accept them for hostnames and IPv4 addresses too.
        host = '[%s]' % self.host

        # Transfer methods to try
        methods = []

        # Use the transfer_method option if set, otherwise use scp_if_ssh
        ssh_transfer_method = self._play_context.ssh_transfer_method
        if ssh_transfer_method is not None:
            if not (ssh_transfer_method in ('smart', 'sftp', 'scp', 'piped')):
                raise AnsibleOptionsError('transfer_method needs to be one of [smart|sftp|scp|piped]')
            if ssh_transfer_method == 'smart':
                methods = ['sftp', 'scp', 'piped']
            else:
                methods = [ssh_transfer_method]
        else:
            # since this can be a non-bool now, we need to handle it correctly
            scp_if_ssh = C.DEFAULT_SCP_IF_SSH
            if not isinstance(scp_if_ssh, bool):
                scp_if_ssh = scp_if_ssh.lower()
                if scp_if_ssh in BOOLEANS:
                    scp_if_ssh = boolean(scp_if_ssh, strict=False)
                elif scp_if_ssh != 'smart':
                    raise AnsibleOptionsError('scp_if_ssh needs to be one of [smart|True|False]')
            if scp_if_ssh == 'smart':
                methods = ['sftp', 'scp', 'piped']
            elif scp_if_ssh is True:
                methods = ['scp']
            else:
                methods = ['sftp']

        for method in methods:
            returncode = stdout = stderr = None
            if method == 'sftp':
                cmd = self._build_command('sftp', to_bytes(host))
                in_data = u"{0} {1} {2}\n".format(sftp_action, shlex_quote(in_path), shlex_quote(out_path))
                in_data = to_bytes(in_data, nonstring='passthru')
                (returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
            elif method == 'scp':
                if sftp_action == 'get':
                    cmd = self._build_command('scp', u'{0}:{1}'.format(host, shlex_quote(in_path)), out_path)
                else:
                    cmd = self._build_command('scp', in_path, u'{0}:{1}'.format(host, shlex_quote(out_path)))
                in_data = None
                (returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
            elif method == 'piped':
                if sftp_action == 'get':
                    # we pass sudoable=False to disable pty allocation, which
                    # would end up mixing stdout/stderr and screwing with newlines
                    (returncode, stdout, stderr) = self.exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), sudoable=False)
                    out_file = open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+')
                    out_file.write(stdout)
                    out_file.close()
                else:
                    in_data = open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb').read()
                    in_data = to_bytes(in_data, nonstring='passthru')
                    (returncode, stdout, stderr) = self.exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), in_data=in_data)

            # Check the return code and rollover to next method if failed
            if returncode == 0:
                return (returncode, stdout, stderr)
            else:
                # If not in smart mode, the data will be printed by the raise below
                if len(methods) > 1:
                    display.warning(msg='%s transfer mechanism failed on %s. Use ANSIBLE_DEBUG=1 to see detailed information' % (method, host))
                    display.debug(msg='%s' % to_native(stdout))
                    display.debug(msg='%s' % to_native(stderr))

        if returncode == 255:
            raise AnsibleConnectionFailure("Failed to connect to the host via %s: %s" % (method, to_native(stderr)))
        else:
            raise AnsibleError("failed to transfer file to %s %s:\n%s\n%s" %
                               (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
Exemplo n.º 28
0
from molecule import api
from molecule import interpolation
from molecule import logger
from molecule import platforms
from molecule import scenario
from molecule import state
from molecule import util
from molecule.dependency import ansible_galaxy
from molecule.dependency import gilt
from molecule.dependency import shell
from molecule.model import schema_v3
from molecule.provisioner import ansible

LOG = logger.get_logger(__name__)
MOLECULE_DEBUG = boolean(os.environ.get('MOLECULE_DEBUG', 'False'))
MOLECULE_DIRECTORY = 'molecule'
MOLECULE_FILE = 'molecule.yml'
MOLECULE_KEEP_STRING = 'MOLECULE_'


# https://stackoverflow.com/questions/16017397/injecting-function-call-after-init-with-decorator  # noqa
class NewInitCaller(type):
    """NewInitCaller."""
    def __call__(cls, *args, **kwargs):
        obj = type.__call__(cls, *args, **kwargs)
        obj.after_init()
        return obj


@six.add_metaclass(NewInitCaller)
Exemplo n.º 29
0
    def run(self, tmp=None, task_vars=None):
        ''' handler for template operations '''

        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        # Options type validation
        # stings
        for s_type in ('src', 'dest', 'state', 'newline_sequence',
                       'variable_start_string', 'variable_end_string',
                       'block_start_string', 'block_end_string'):
            if s_type in self._task.args:
                value = ensure_type(self._task.args[s_type], 'string')
                if value is not None and not isinstance(value, string_types):
                    raise AnsibleActionFail(
                        "%s is expected to be a string, but got %s instead" %
                        (s_type, type(value)))
                self._task.args[s_type] = value

        # booleans
        try:
            follow = boolean(self._task.args.get('follow', False),
                             strict=False)
            trim_blocks = boolean(self._task.args.get('trim_blocks', True),
                                  strict=False)
            lstrip_blocks = boolean(self._task.args.get(
                'lstrip_blocks', False),
                                    strict=False)
        except TypeError as e:
            raise AnsibleActionFail(to_native(e))

        # assign to local vars for ease of use
        source = self._task.args.get('src', None)
        dest = self._task.args.get('dest', None)
        state = self._task.args.get('state', None)
        newline_sequence = self._task.args.get('newline_sequence',
                                               self.DEFAULT_NEWLINE_SEQUENCE)
        variable_start_string = self._task.args.get('variable_start_string',
                                                    None)
        variable_end_string = self._task.args.get('variable_end_string', None)
        block_start_string = self._task.args.get('block_start_string', None)
        block_end_string = self._task.args.get('block_end_string', None)
        output_encoding = self._task.args.get('output_encoding',
                                              'utf-8') or 'utf-8'

        # Option `lstrip_blocks' was added in Jinja2 version 2.7.
        if lstrip_blocks:
            try:
                import jinja2.defaults
            except ImportError:
                raise AnsibleError(
                    'Unable to import Jinja2 defaults for determining Jinja2 features.'
                )

            try:
                jinja2.defaults.LSTRIP_BLOCKS
            except AttributeError:
                raise AnsibleError(
                    "Option `lstrip_blocks' is only available in Jinja2 versions >=2.7"
                )

        wrong_sequences = ["\\n", "\\r", "\\r\\n"]
        allowed_sequences = ["\n", "\r", "\r\n"]

        # We need to convert unescaped sequences to proper escaped sequences for Jinja2
        if newline_sequence in wrong_sequences:
            newline_sequence = allowed_sequences[wrong_sequences.index(
                newline_sequence)]

        try:
            # logical validation
            if state is not None:
                raise AnsibleActionFail(
                    "'state' cannot be specified on a template")
            elif source is None or dest is None:
                raise AnsibleActionFail("src and dest are required")
            elif newline_sequence not in allowed_sequences:
                raise AnsibleActionFail(
                    "newline_sequence needs to be one of: \n, \r or \r\n")
            else:
                try:
                    source = self._find_needle('templates', source)
                except AnsibleError as e:
                    raise AnsibleActionFail(to_text(e))

            mode = self._task.args.get('mode', None)
            if mode == 'preserve':
                mode = '0%03o' % stat.S_IMODE(os.stat(source).st_mode)

            # Get vault decrypted tmp file
            try:
                tmp_source = self._loader.get_real_file(source)
            except AnsibleFileNotFound as e:
                raise AnsibleActionFail("could not find src=%s, %s" %
                                        (source, to_text(e)))
            b_tmp_source = to_bytes(tmp_source, errors='surrogate_or_strict')

            # template the source data locally & get ready to transfer
            try:
                with open(b_tmp_source, 'rb') as f:
                    try:
                        template_data = to_text(f.read(),
                                                errors='surrogate_or_strict')
                    except UnicodeError:
                        raise AnsibleActionFail(
                            "Template source files must be utf-8 encoded")

                # set jinja2 internal search path for includes
                searchpath = task_vars.get('ansible_search_path', [])
                searchpath.extend(
                    [self._loader._basedir,
                     os.path.dirname(source)])

                # We want to search into the 'templates' subdir of each search path in
                # addition to our original search paths.
                newsearchpath = []
                for p in searchpath:
                    newsearchpath.append(os.path.join(p, 'templates'))
                    newsearchpath.append(p)
                searchpath = newsearchpath

                self._templar.environment.loader.searchpath = searchpath
                self._templar.environment.newline_sequence = newline_sequence
                if block_start_string is not None:
                    self._templar.environment.block_start_string = block_start_string
                if block_end_string is not None:
                    self._templar.environment.block_end_string = block_end_string
                if variable_start_string is not None:
                    self._templar.environment.variable_start_string = variable_start_string
                if variable_end_string is not None:
                    self._templar.environment.variable_end_string = variable_end_string
                self._templar.environment.trim_blocks = trim_blocks
                self._templar.environment.lstrip_blocks = lstrip_blocks

                # add ansible 'template' vars
                temp_vars = task_vars.copy()
                temp_vars.update(generate_ansible_template_vars(source, dest))

                old_vars = self._templar._available_variables
                self._templar.set_available_variables(temp_vars)
                resultant = self._templar.do_template(
                    template_data,
                    preserve_trailing_newlines=True,
                    escape_backslashes=False)
                self._templar.set_available_variables(old_vars)
            except AnsibleAction:
                raise
            except Exception as e:
                raise AnsibleActionFail("%s: %s" %
                                        (type(e).__name__, to_text(e)))
            finally:
                self._loader.cleanup_tmp_file(b_tmp_source)

            new_task = self._task.copy()
            # mode is either the mode from task.args or the mode of the source file if the task.args
            # mode == 'preserve'
            new_task.args['mode'] = mode

            # remove 'template only' options:
            for remove in ('newline_sequence', 'block_start_string',
                           'block_end_string', 'variable_start_string',
                           'variable_end_string', 'trim_blocks',
                           'lstrip_blocks', 'output_encoding'):
                new_task.args.pop(remove, None)

            local_tempdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)

            try:
                result_file = os.path.join(local_tempdir,
                                           os.path.basename(source))
                with open(to_bytes(result_file, errors='surrogate_or_strict'),
                          'wb') as f:
                    f.write(
                        to_bytes(resultant,
                                 encoding=output_encoding,
                                 errors='surrogate_or_strict'))

                new_task.args.update(
                    dict(
                        src=result_file,
                        dest=dest,
                        follow=follow,
                    ), )
                copy_action = self._shared_loader_obj.action_loader.get(
                    'copy',
                    task=new_task,
                    connection=self._connection,
                    play_context=self._play_context,
                    loader=self._loader,
                    templar=self._templar,
                    shared_loader_obj=self._shared_loader_obj)
                result.update(copy_action.run(task_vars=task_vars))
            finally:
                shutil.rmtree(
                    to_bytes(local_tempdir, errors='surrogate_or_strict'))

        except AnsibleAction as e:
            result.update(e.result)
        finally:
            self._remove_tmp_path(self._connection._shell.tmpdir)

        return result
Exemplo n.º 30
0
    def run(self, tmp=None, task_vars=None):
        ''' handler for fetch operations '''
        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        try:
            result = dict(msg="",
                          stderr="",
                          stdout="",
                          file="",
                          md5sum="",
                          dest="",
                          remote_md5sum="",
                          remote_checksum="",
                          checksum="",
                          delta="",
                          failed=False)
            savf_name = ''
            created = False
            is_savf = False
            savf = ''
            ifs_created = False
            backup = False
            is_lib = False
            force_save = False
            flat = False
            if self._play_context.check_mode:
                result['skipped'] = True
                result[
                    'msg'] = 'check mode not (yet) supported for this module'
                return result

            object_names = self._task.args.get('object_names', '*ALL')
            lib_name = self._task.args.get('lib_name', None)
            object_types = self._task.args.get('object_types', '*ALL')
            is_lib = boolean(self._task.args.get('is_lib', False), strict=True)
            savefile_name = self._task.args.get('savefile_name', None)
            force_save = boolean(self._task.args.get('force_save', False),
                                 strict=True)
            backup = boolean(self._task.args.get('backup', False), strict=True)
            format = self._task.args.get('format', '*SAVF')
            target_release = self._task.args.get('target_release', '*CURRENT')
            dest = self._task.args.get('dest', None)
            flat = boolean(self._task.args.get('flat', False), strict=True)
            validate_checksum = boolean(self._task.args.get(
                'validate_checksum', True),
                                        strict=True)

            # validate dest are strings FIXME: use basic.py and module specs
            if not isinstance(dest, string_types):
                result[
                    'msg'] = "Invalid type supplied for dest option, it must be a string. "

            if lib_name is None or dest is None:
                result['msg'] = "lib_name and dest are required. "

            object_names = object_names.upper()
            object_types = object_types.upper()
            format = format.upper()
            target_release = target_release.upper()

            if lib_name is not None:
                lib_name = lib_name.upper()

            if savefile_name is not None:
                savefile_name = savefile_name.upper()

            if lib_name == 'QSYS' and (is_lib is True or
                                       (object_names == '*ALL'
                                        and object_types == '*ALL')):
                result['msg'] = "QSYS can't be saved."

            if format != "*SAVF":
                result['msg'] = "format can only be *SAVF."

            if result.get('msg'):
                result['failed'] = True
                return result

            startd = datetime.datetime.now()

            if len(object_names.split()) == 1 and is_lib is not True:
                if object_types == '*ALL' or object_types == '*FILE':
                    if (object_names.split())[0][-1] == '*':
                        module_args = {
                            'object_name': object_names[0:-1] + '+',
                            'lib_name': lib_name,
                            'use_regex': True
                        }
                        module_output = self._execute_module(
                            module_name='ibmi_object_find',
                            module_args=module_args)
                        save_result = module_output
                        if not save_result.get('failed'):
                            if len(save_result['object_list']) == 1 and save_result['object_list'][0]['OBJTYPE'] == '*FILE' \
                               and save_result['object_list'][0]['OBJATTRIBUTE'] == 'SAVF':
                                result[
                                    'msg'] += "Object is a save file, fetch it directly."
                                savf_path = self._calculate_savf_path(
                                    save_result['object_list'][0]['OBJNAME'],
                                    lib_name)
                                savf_name = save_result['object_list'][0][
                                    'OBJNAME']
                                is_savf = True
                    else:
                        module_args = {
                            'object_name': object_names,
                            'lib_name': lib_name
                        }
                        module_output = self._execute_module(
                            module_name='ibmi_object_find',
                            module_args=module_args)
                        save_result = module_output
                        if not save_result.get('failed'):
                            if len(save_result['object_list']) == 1 and save_result['object_list'][0]['OBJTYPE'] == '*FILE' and \
                               save_result['object_list'][0]['OBJATTRIBUTE'] == 'SAVF':
                                result[
                                    'msg'] += "Object is a save file, fetch it directly."
                                savf_path = self._calculate_savf_path(
                                    object_names, lib_name)
                                savf_name = object_names
                                is_savf = True

                    if save_result.get('failed'):
                        result.update(save_result)
                        return result
            if is_savf is False:
                savf_name, savf_path = self._calculate_savf_name(
                    object_names, lib_name, is_lib, savefile_name, task_vars,
                    result)
                if is_lib is True:
                    omitfile = 'OMITOBJ((%s/%s *FILE))' % (lib_name, savf_name)
                    module_args = {
                        'lib_name': lib_name,
                        'savefile_name': savf_name,
                        'savefile_lib': lib_name,
                        'target_release': target_release,
                        'force_save': force_save,
                        'joblog': True,
                        'parameters': omitfile
                    }
                    module_output = self._execute_module(
                        module_name='ibmi_lib_save', module_args=module_args)
                else:
                    omitfile = 'OMITOBJ((%s/%s *FILE))' % (lib_name, savf_name)
                    module_args = {
                        'object_names': object_names,
                        'object_lib': lib_name,
                        'object_types': object_types,
                        'savefile_name': savf_name,
                        'savefile_lib': lib_name,
                        'target_release': target_release,
                        'force_save': force_save,
                        'joblog': False,
                        'parameters': omitfile
                    }
                    module_output = self._execute_module(
                        module_name='ibmi_object_save',
                        module_args=module_args)

                save_result = module_output
                rc = save_result['rc']
                if rc != 0 or ('CPC3708' in save_result['stdout']):
                    result[
                        'msg'] = 'Create SAVF failed. See stderr or stdout for more information.'
                    result['failed'] = True
                    result['stderr'] = save_result['stderr_lines']
                    result['stdout'] = save_result['stdout_lines']
                    return result
                created = True

            source = savf_path
            commandmk = 'mkdir %s' % ifs_dir
            command = 'cp %s %s' % (savf_path, ifs_dir)

            module_output = self._execute_module(
                module_name='command', module_args={'_raw_params': commandmk})
            save_result = module_output
            rc = save_result['rc']
            if rc != 0 and ('exists' not in save_result['stderr']):
                result['msg'] = save_result['msg']
                result['failed'] = True
                result['stderr'] = save_result['stderr_lines']
                return result
            module_output = self._execute_module(
                module_name='command', module_args={'_raw_params': command})
            save_result = module_output
            rc = save_result['rc']
            if rc != 0:
                result['msg'] = save_result['msg']
                result['failed'] = True
                result['stderr'] = save_result['stderr_lines']
                result['stdout'] = save_result['stdout_lines']
                return result
            ifs_created = True

            source = '%s/%s' % (ifs_dir, os.path.basename(savf_path))

            if not isinstance(source, string_types):
                result[
                    'msg'] = "Invalid type supplied for source option, it must be a string"
                result['failed'] = True
                return result
            source = self._connection._shell.join_path(source)
            source = self._remote_expand_user(source)
            remote_checksum = None
            if not self._connection.become:
                # calculate checksum for the remote file, don't bother if using become as slurp will be used
                # Force remote_checksum to follow symlinks because fetch always follows symlinks
                remote_checksum = self._remote_checksum(source,
                                                        all_vars=task_vars,
                                                        follow=True)
            # use slurp if permissions are lacking or privilege escalation is needed
            remote_data = None
            if remote_checksum in ('1', '2', None):
                slurpres = self._execute_module(module_name='slurp',
                                                module_args=dict(src=source),
                                                task_vars=task_vars)
                if slurpres.get('failed'):
                    if (slurpres.get('msg').startswith('file not found')
                            or remote_checksum == '1'):
                        result[
                            'msg'] = "the remote file does not exist, not transferring"
                        result['file'] = source
                        result['changed'] = False
                    else:
                        result.update(slurpres)
                    return result
                else:
                    if slurpres['encoding'] == 'base64':
                        remote_data = base64.b64decode(slurpres['content'])
                    if remote_data is not None:
                        remote_checksum = checksum_s(remote_data)
                    # the source path may have been expanded on the
                    # target system, so we compare it here and use the
                    # expanded version if it's different
                    remote_source = slurpres.get('source')
                    if remote_source and remote_source != source:
                        source = remote_source

            # calculate the destination name
            if os.path.sep not in self._connection._shell.join_path('a', ''):
                source = self._connection._shell._unquote(source)
                qsys_source = self._connection._shell._unquote(savf_path)
                source_local = qsys_source.replace('\\', '/')
            else:
                source_local = savf_path

            dest = os.path.expanduser(dest)
            if flat:
                if not dest.startswith("/"):
                    # if dest does not start with "/", we'll assume a relative path
                    dest = self._loader.path_dwim(dest)
                base = os.path.basename(source_local)
                dest = os.path.join(dest, base)
            else:
                # files are saved in dest dir, with a subdir for each host, then the filename
                if 'inventory_hostname' in task_vars:
                    target_name = task_vars['inventory_hostname']
                else:
                    target_name = self._play_context.remote_addr
                dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name,
                                     source_local)

            dest = dest.replace("//", "/")

            if remote_checksum in ('0', '1', '2', '3', '4', '5'):
                result['changed'] = False
                result['file'] = source
                if remote_checksum == '0':
                    result[
                        'msg'] = "unable to calculate the checksum of the remote file"
                elif remote_checksum == '1':
                    result['msg'] = "the remote file does not exist"
                elif remote_checksum == '2':
                    result['msg'] = "no read permission on remote file"
                elif remote_checksum == '3':
                    result[
                        'msg'] = "remote file is a directory, fetch cannot work on directories"
                elif remote_checksum == '4':
                    result[
                        'msg'] = "python isn't present on the system.  Unable to compute checksum"
                elif remote_checksum == '5':
                    result[
                        'msg'] = "stdlib json was not found on the remote machine. Only the raw module can work without those installed"

                result['failed'] = True
                return result

            # calculate checksum for the local file
            local_checksum = checksum(dest)
            if remote_checksum != local_checksum:
                # create the containing directories, if needed
                makedirs_safe(os.path.dirname(dest))

                # fetch the file and check for changes
                if remote_data is None:
                    self._connection.fetch_file(source, dest)
                else:
                    try:
                        f = open(to_bytes(dest, errors='surrogate_or_strict'),
                                 'wb')
                        f.write(remote_data)
                        f.close()
                    except (IOError, OSError) as e:
                        raise AnsibleError("Failed to fetch the file: %s" % e)
                new_checksum = secure_hash(dest)
                # For backwards compatibility. We'll return None on FIPS enabled systems
                try:
                    new_md5 = md5(dest)
                except ValueError:
                    new_md5 = None

                if validate_checksum and new_checksum != remote_checksum:
                    result.update(
                        dict(failed=True,
                             md5sum=new_md5,
                             msg="checksum mismatch",
                             file=savf,
                             dest=dest,
                             remote_md5sum=None,
                             checksum=new_checksum,
                             remote_checksum=remote_checksum))
                else:
                    endd = datetime.datetime.now()
                    delta = endd - startd
                    if (created is True and backup is True) or is_savf is True:
                        savf = savf_path
                    result['msg'] += " File is renewed on local."
                    result.update({
                        'changed': True,
                        'md5sum': new_md5,
                        'dest': dest,
                        'remote_md5sum': None,
                        'checksum': new_checksum,
                        'remote_checksum': remote_checksum,
                        'delta': str(delta),
                        'file': savf
                    })
            else:
                # For backwards compatibility. We'll return None on FIPS enabled systems
                try:
                    local_md5 = md5(dest)
                except ValueError:
                    local_md5 = None
                endd = datetime.datetime.now()
                delta = endd - startd
                if (created is True and backup is True) or is_savf is True:
                    savf = savf_path
                result.update(
                    dict(changed=False,
                         md5sum=local_md5,
                         file=savf,
                         delta=str(delta),
                         dest=dest,
                         checksum=local_checksum))

        except Exception as e:
            result['msg'] += "%s" % to_text(e)
            result['failed'] = True
            return result
        finally:
            if ((backup is False and is_savf is False)
                    or result['failed'] is True) and created is True:
                cmd = 'QSYS/DLTOBJ OBJ(%s/%s) OBJTYPE(*FILE)' % (lib_name,
                                                                 savf_name)
                module_output = self._execute_module(
                    module_name='ibmi_cl_command', module_args={'cmd': cmd})
                save_result = module_output
                rc = save_result['rc']
                if rc != 0 and ('CPF2105' not in save_result['stderr']):
                    result['msg'] += "Failed to delete SAVF on remote"
            if ifs_created is True:
                cmd = 'rm %s/%s' % (ifs_dir, os.path.basename(savf_path))
                try:
                    module_output = self._execute_module(
                        module_name='command',
                        module_args={'_raw_params': cmd})
                    save_result = module_output
                    rc = save_result['rc']
                    if rc != 0:
                        result['msg'] += "Failed to delete IFS on remote"
                except Exception as e:
                    result[
                        'msg'] += "exception happens when delete IFS file. error: %s" % to_text(
                            e)

            self._remove_tmp_path(self._connection._shell.tmpdir)

        return result
Exemplo n.º 31
0
    def run(self, tmp=None, task_vars=None):
        ''' handler for fetch operations '''
        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        try:
            if self._play_context.check_mode:
                result['skipped'] = True
                result['msg'] = 'check mode not (yet) supported for this module'
                return result

            source = self._task.args.get('src', None)
            dest = self._task.args.get('dest', None)
            flat = boolean(self._task.args.get('flat'), strict=False)
            fail_on_missing = boolean(self._task.args.get('fail_on_missing', True), strict=False)
            validate_checksum = boolean(self._task.args.get('validate_checksum',
                                                            self._task.args.get('validate_md5', True)),
                                        strict=False)

            # validate source and dest are strings FIXME: use basic.py and module specs
            if not isinstance(source, string_types):
                result['msg'] = "Invalid type supplied for source option, it must be a string"

            if not isinstance(dest, string_types):
                result['msg'] = "Invalid type supplied for dest option, it must be a string"

            # validate_md5 is the deprecated way to specify validate_checksum
            if 'validate_md5' in self._task.args and 'validate_checksum' in self._task.args:
                result['msg'] = "validate_checksum and validate_md5 cannot both be specified"

            if 'validate_md5' in self._task.args:
                display.deprecated('Use validate_checksum instead of validate_md5', version='2.8')

            if source is None or dest is None:
                result['msg'] = "src and dest are required"

            if result.get('msg'):
                result['failed'] = True
                return result

            source = self._connection._shell.join_path(source)
            source = self._remote_expand_user(source)

            remote_checksum = None
            if not self._play_context.become:
                # calculate checksum for the remote file, don't bother if using become as slurp will be used
                # Force remote_checksum to follow symlinks because fetch always follows symlinks
                remote_checksum = self._remote_checksum(source, all_vars=task_vars, follow=True)

            # use slurp if permissions are lacking or privilege escalation is needed
            remote_data = None
            if remote_checksum in ('1', '2', None):
                slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars)
                if slurpres.get('failed'):
                    if not fail_on_missing and (slurpres.get('msg').startswith('file not found') or remote_checksum == '1'):
                        result['msg'] = "the remote file does not exist, not transferring, ignored"
                        result['file'] = source
                        result['changed'] = False
                    else:
                        result.update(slurpres)
                    return result
                else:
                    if slurpres['encoding'] == 'base64':
                        remote_data = base64.b64decode(slurpres['content'])
                    if remote_data is not None:
                        remote_checksum = checksum_s(remote_data)
                    # the source path may have been expanded on the
                    # target system, so we compare it here and use the
                    # expanded version if it's different
                    remote_source = slurpres.get('source')
                    if remote_source and remote_source != source:
                        source = remote_source

            # calculate the destination name
            if os.path.sep not in self._connection._shell.join_path('a', ''):
                source = self._connection._shell._unquote(source)
                source_local = source.replace('\\', '/')
            else:
                source_local = source

            dest = os.path.expanduser(dest)
            if flat:
                if os.path.isdir(to_bytes(dest, errors='surrogate_or_strict')) and not dest.endswith(os.sep):
                    result['msg'] = "dest is an existing directory, use a trailing slash if you want to fetch src into that directory"
                    result['file'] = dest
                    result['failed'] = True
                    return result
                if dest.endswith(os.sep):
                    # if the path ends with "/", we'll use the source filename as the
                    # destination filename
                    base = os.path.basename(source_local)
                    dest = os.path.join(dest, base)
                if not dest.startswith("/"):
                    # if dest does not start with "/", we'll assume a relative path
                    dest = self._loader.path_dwim(dest)
            else:
                # files are saved in dest dir, with a subdir for each host, then the filename
                if 'inventory_hostname' in task_vars:
                    target_name = task_vars['inventory_hostname']
                else:
                    target_name = self._play_context.remote_addr
                dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local)

            dest = dest.replace("//", "/")

            if remote_checksum in ('0', '1', '2', '3', '4', '5'):
                result['changed'] = False
                result['file'] = source
                if remote_checksum == '0':
                    result['msg'] = "unable to calculate the checksum of the remote file"
                elif remote_checksum == '1':
                    result['msg'] = "the remote file does not exist"
                elif remote_checksum == '2':
                    result['msg'] = "no read permission on remote file"
                elif remote_checksum == '3':
                    result['msg'] = "remote file is a directory, fetch cannot work on directories"
                elif remote_checksum == '4':
                    result['msg'] = "python isn't present on the system.  Unable to compute checksum"
                elif remote_checksum == '5':
                    result['msg'] = "stdlib json or simplejson was not found on the remote machine. Only the raw module can work without those installed"
                # Historically, these don't fail because you may want to transfer
                # a log file that possibly MAY exist but keep going to fetch other
                # log files. Today, this is better achieved by adding
                # ignore_errors or failed_when to the task.  Control the behaviour
                # via fail_when_missing
                if fail_on_missing:
                    result['failed'] = True
                    del result['changed']
                else:
                    result['msg'] += ", not transferring, ignored"
                return result

            # calculate checksum for the local file
            local_checksum = checksum(dest)

            if remote_checksum != local_checksum:
                # create the containing directories, if needed
                makedirs_safe(os.path.dirname(dest))

                # fetch the file and check for changes
                if remote_data is None:
                    self._connection.fetch_file(source, dest)
                else:
                    try:
                        f = open(to_bytes(dest, errors='surrogate_or_strict'), 'wb')
                        f.write(remote_data)
                        f.close()
                    except (IOError, OSError) as e:
                        raise AnsibleError("Failed to fetch the file: %s" % e)
                new_checksum = secure_hash(dest)
                # For backwards compatibility. We'll return None on FIPS enabled systems
                try:
                    new_md5 = md5(dest)
                except ValueError:
                    new_md5 = None

                if validate_checksum and new_checksum != remote_checksum:
                    result.update(dict(failed=True, md5sum=new_md5,
                                       msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None,
                                       checksum=new_checksum, remote_checksum=remote_checksum))
                else:
                    result.update({'changed': True, 'md5sum': new_md5, 'dest': dest,
                                   'remote_md5sum': None, 'checksum': new_checksum,
                                   'remote_checksum': remote_checksum})
            else:
                # For backwards compatibility. We'll return None on FIPS enabled systems
                try:
                    local_md5 = md5(dest)
                except ValueError:
                    local_md5 = None
                result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum))

        finally:
            self._remove_tmp_path(self._connection._shell.tmpdir)

        return result
Exemplo n.º 32
0
    def run(self):

        '''
        Run the given playbook, based on the settings in the play which
        may limit the runs to serialized groups, etc.
        '''

        result = 0
        entrylist = []
        entry = {}
        try:
            for playbook_path in self._playbooks:
                pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
                # FIXME: move out of inventory self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))

                if self._tqm is None:  # we are doing a listing
                    entry = {'playbook': playbook_path}
                    entry['plays'] = []
                else:
                    # make sure the tqm has callbacks loaded
                    self._tqm.load_callbacks()
                    self._tqm.send_callback('v2_playbook_on_start', pb)

                i = 1
                plays = pb.get_plays()
                display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path)))

                for play in plays:
                    if play._included_path is not None:
                        self._loader.set_basedir(play._included_path)
                    else:
                        self._loader.set_basedir(pb._basedir)

                    # clear any filters which may have been applied to the inventory
                    self._inventory.remove_restriction()

                    if play.vars_prompt:
                        for var in play.vars_prompt:
                            vname = var['name']
                            prompt = var.get("prompt", vname)
                            default = var.get("default", None)
                            private = boolean(var.get("private", True))
                            confirm = boolean(var.get("confirm", False))
                            encrypt = var.get("encrypt", None)
                            salt_size = var.get("salt_size", None)
                            salt = var.get("salt", None)

                            if vname not in self._variable_manager.extra_vars:
                                if self._tqm:
                                    self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default)
                                    play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default)
                                else:  # we are either in --list-<option> or syntax check
                                    play.vars[vname] = default

                    # Create a temporary copy of the play here, so we can run post_validate
                    # on it without the templating changes affecting the original object.
                    all_vars = self._variable_manager.get_vars(play=play)
                    templar = Templar(loader=self._loader, variables=all_vars)
                    new_play = play.copy()
                    new_play.post_validate(templar)

                    if self._options.syntax:
                        continue

                    if self._tqm is None:
                        # we are just doing a listing
                        entry['plays'].append(new_play)

                    else:
                        self._tqm._unreachable_hosts.update(self._unreachable_hosts)

                        previously_failed = len(self._tqm._failed_hosts)
                        previously_unreachable = len(self._tqm._unreachable_hosts)

                        break_play = False
                        # we are actually running plays
                        batches = self._get_serialized_batches(new_play)
                        if len(batches) == 0:
                            self._tqm.send_callback('v2_playbook_on_play_start', new_play)
                            self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
                        for batch in batches:
                            # restrict the inventory to the hosts in the serialized batch
                            self._inventory.restrict_to_hosts(batch)
                            # and run it...
                            result = self._tqm.run(play=play)

                            # break the play if the result equals the special return code
                            if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0:
                                result = self._tqm.RUN_FAILED_HOSTS
                                break_play = True

                            # check the number of failures here, to see if they're above the maximum
                            # failure percentage allowed, or if any errors are fatal. If either of those
                            # conditions are met, we break out, otherwise we only break out if the entire
                            # batch failed
                            failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \
                                (previously_failed + previously_unreachable)

                            if len(batch) == failed_hosts_count:
                                break_play = True
                                break

                            # update the previous counts so they don't accumulate incorrectly
                            # over multiple serial batches
                            previously_failed += len(self._tqm._failed_hosts) - previously_failed
                            previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable

                            # save the unreachable hosts from this batch
                            self._unreachable_hosts.update(self._tqm._unreachable_hosts)

                        if break_play:
                            break

                    i = i + 1  # per play

                if entry:
                    entrylist.append(entry)  # per playbook

                # send the stats callback for this playbook
                if self._tqm is not None:
                    if C.RETRY_FILES_ENABLED:
                        retries = set(self._tqm._failed_hosts.keys())
                        retries.update(self._tqm._unreachable_hosts.keys())
                        retries = sorted(retries)
                        if len(retries) > 0:
                            if C.RETRY_FILES_SAVE_PATH:
                                basedir = C.RETRY_FILES_SAVE_PATH
                            elif playbook_path:
                                basedir = os.path.dirname(os.path.abspath(playbook_path))
                            else:
                                basedir = '~/'

                            (retry_name, _) = os.path.splitext(os.path.basename(playbook_path))
                            filename = os.path.join(basedir, "%s.retry" % retry_name)
                            if self._generate_retry_inventory(filename, retries):
                                display.display("\tto retry, use: --limit @%s\n" % filename)

                    self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)

                # if the last result wasn't zero, break out of the playbook file name loop
                if result != 0:
                    break

            if entrylist:
                return entrylist

        finally:
            if self._tqm is not None:
                self._tqm.cleanup()
            if self._loader:
                self._loader.cleanup_all_tmp_files()

        if self._options.syntax:
            display.display("No issues encountered")
            return result

        return result
Exemplo n.º 33
0
    def run(self, tmp=None, task_vars=None):
        ''' handler for unarchive operations '''
        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        source = self._task.args.get('src', None)
        dest = self._task.args.get('dest', None)
        remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
        creates = self._task.args.get('creates', None)
        decrypt = self._task.args.get('decrypt', True)

        try:
            # "copy" is deprecated in favor of "remote_src".
            if 'copy' in self._task.args:
                # They are mutually exclusive.
                if 'remote_src' in self._task.args:
                    raise AnsibleActionFail("parameters are mutually exclusive: ('copy', 'remote_src')")
                # We will take the information from copy and store it in
                # the remote_src var to use later in this file.
                self._task.args['remote_src'] = remote_src = not boolean(self._task.args.pop('copy'), strict=False)

            if source is None or dest is None:
                raise AnsibleActionFail("src (or content) and dest are required")

            if creates:
                # do not run the command if the line contains creates=filename
                # and the filename already exists. This allows idempotence
                # of command executions.
                creates = self._remote_expand_user(creates)
                if self._remote_file_exists(creates):
                    raise AnsibleActionSkip("skipped, since %s exists" % creates)

            dest = self._remote_expand_user(dest)  # CCTODO: Fix path for Windows hosts.
            source = os.path.expanduser(source)

            if not remote_src:
                try:
                    source = self._loader.get_real_file(self._find_needle('files', source), decrypt=decrypt)
                except AnsibleError as e:
                    raise AnsibleActionFail(to_text(e))

            try:
                remote_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=True)
            except AnsibleError as e:
                raise AnsibleActionFail(to_text(e))

            if not remote_stat['exists'] or not remote_stat['isdir']:
                raise AnsibleActionFail("dest '%s' must be an existing dir" % dest)

            if not remote_src:
                # transfer the file to a remote tmp location
                tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, 'source')
                self._transfer_file(source, tmp_src)

            # handle diff mode client side
            # handle check mode client side

            if not remote_src:
                # fix file permissions when the copy is done as a different user
                self._fixup_perms2((self._connection._shell.tmpdir, tmp_src))
                # Build temporary module_args.
                new_module_args = self._task.args.copy()
                new_module_args.update(
                    dict(
                        src=tmp_src,
                        original_basename=os.path.basename(source),
                    ),
                )

            else:
                new_module_args = self._task.args.copy()
                new_module_args.update(
                    dict(
                        original_basename=os.path.basename(source),
                    ),
                )

            # remove action plugin only key
            for key in ('decrypt',):
                if key in new_module_args:
                    del new_module_args[key]

                # execute the unarchive module now, with the updated args
                result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))
        except AnsibleAction as e:
            result.update(e.result)
        finally:
            self._remove_tmp_path(self._connection._shell.tmpdir)
        return result
Exemplo n.º 34
0
from molecule import api
from molecule import interpolation
from molecule import logger
from molecule import platforms
from molecule import scenario
from molecule import state
from molecule import util
from molecule.dependency import ansible_galaxy
from molecule.dependency import gilt
from molecule.dependency import shell
from molecule.model import schema_v3
from molecule.provisioner import ansible

LOG = logger.get_logger(__name__)
MOLECULE_DEBUG = boolean(os.environ.get("MOLECULE_DEBUG", "False"))
MOLECULE_DIRECTORY = "molecule"
MOLECULE_FILE = "molecule.yml"
MOLECULE_KEEP_STRING = "MOLECULE_"


# https://stackoverflow.com/questions/16017397/injecting-function-call-after-init-with-decorator  # noqa
class NewInitCaller(type):
    """NewInitCaller."""
    def __call__(cls, *args, **kwargs):
        obj = type.__call__(cls, *args, **kwargs)
        obj.after_init()
        return obj


@six.add_metaclass(NewInitCaller)
Exemplo n.º 35
0
    def run(self, tmp=None, task_vars=None):
        ''' handler for file transfer operations '''
        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)

        source = self._task.args.get('src', None)
        content = self._task.args.get('content', None)
        dest = self._task.args.get('dest', None)
        remote_src = boolean(self._task.args.get('remote_src', False),
                             strict=False)
        local_follow = boolean(self._task.args.get('local_follow', False),
                               strict=False)
        force = boolean(self._task.args.get('force', True), strict=False)

        result['src'] = source
        result['dest'] = dest

        result['failed'] = True
        if (source is None and content is None) or dest is None:
            result['msg'] = "src (or content) and dest are required"
        elif source is not None and content is not None:
            result['msg'] = "src and content are mutually exclusive"
        elif content is not None and dest is not None and (dest.endswith(
                os.path.sep) or dest.endswith(self.WIN_PATH_SEPARATOR)):
            result['msg'] = "dest must be a file if content is defined"
        else:
            del result['failed']

        if result.get('failed'):
            return result

        # If content is defined make a temp file and write the content into it
        content_tempfile = None
        if content is not None:
            try:
                # if content comes to us as a dict it should be decoded json.
                # We need to encode it back into a string and write it out
                if isinstance(content, dict) or isinstance(content, list):
                    content_tempfile = self._create_content_tempfile(
                        json.dumps(content))
                else:
                    content_tempfile = self._create_content_tempfile(content)
                source = content_tempfile
            except Exception as err:
                result['failed'] = True
                result[
                    'msg'] = "could not write content temp file: %s" % to_native(
                        err)
                return result
        # all actions should occur on the remote server, run win_copy module
        elif remote_src:
            new_module_args = self._task.args.copy()
            new_module_args.update(
                dict(mode="remote", dest=dest, src=source, force=force))
            new_module_args.pop('content', None)
            result.update(
                self._execute_module(module_args=new_module_args,
                                     task_vars=task_vars))
            return result
        # find_needle returns a path that may not have a trailing slash on a
        # directory so we need to find that out first and append at the end
        else:
            trailing_slash = source.endswith(os.path.sep)
            try:
                # find in expected paths
                source = self._find_needle('files', source)
            except AnsibleError as e:
                result['failed'] = True
                result['msg'] = to_text(e)
                result['exception'] = traceback.format_exc()
                return result

            if trailing_slash != source.endswith(os.path.sep):
                if source[-1] == os.path.sep:
                    source = source[:-1]
                else:
                    source = source + os.path.sep

        # A list of source file tuples (full_path, relative_path) which will try to copy to the destination
        source_files = {'files': [], 'directories': [], 'symlinks': []}

        # If source is a directory populate our list else source is a file and translate it to a tuple.
        if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')):
            result['operation'] = 'folder_copy'

            # Get a list of the files we want to replicate on the remote side
            source_files = _walk_dirs(source,
                                      self._loader,
                                      local_follow=local_follow,
                                      trailing_slash_detector=self._connection.
                                      _shell.path_has_trailing_slash,
                                      checksum_check=force)

            # If it's recursive copy, destination is always a dir,
            # explicitly mark it so (note - win_copy module relies on this).
            if not self._connection._shell.path_has_trailing_slash(dest):
                dest = "%s%s" % (dest, self.WIN_PATH_SEPARATOR)

            check_dest = dest
        # Source is a file, add details to source_files dict
        else:
            result['operation'] = 'file_copy'

            # If the local file does not exist, get_real_file() raises AnsibleFileNotFound
            try:
                source_full = self._loader.get_real_file(source, decrypt=True)
            except AnsibleFileNotFound as e:
                result['failed'] = True
                result['msg'] = "could not find src=%s, %s" % (source_full,
                                                               to_text(e))
                return result

            original_basename = os.path.basename(source)
            result['original_basename'] = original_basename

            # check if dest ends with / or \ and append source filename to dest
            if self._connection._shell.path_has_trailing_slash(dest):
                check_dest = dest
                filename = original_basename
                result['dest'] = self._connection._shell.join_path(
                    dest, filename)
            else:
                # replace \\ with / so we can use os.path to get the filename or dirname
                unix_path = dest.replace(self.WIN_PATH_SEPARATOR, os.path.sep)
                filename = os.path.basename(unix_path)
                check_dest = os.path.dirname(unix_path)

            file_checksum = _get_local_checksum(force, source_full)
            source_files['files'].append(
                dict(src=source_full, dest=filename, checksum=file_checksum))
            result['checksum'] = file_checksum
            result['size'] = os.path.getsize(
                to_bytes(source_full, errors='surrogate_or_strict'))

        # find out the files/directories/symlinks that we need to copy to the server
        query_args = self._task.args.copy()
        query_args.update(
            dict(mode="query",
                 dest=check_dest,
                 force=force,
                 files=source_files['files'],
                 directories=source_files['directories'],
                 symlinks=source_files['symlinks']))
        # src is not required for query, will fail path validation is src has unix allowed chars
        query_args.pop('src', None)

        query_args.pop('content', None)
        query_return = self._execute_module(module_args=query_args,
                                            task_vars=task_vars)

        if query_return.get('failed') is True:
            result.update(query_return)
            return result

        if len(query_return['files']) == 1 and len(
                query_return['directories']) == 0:
            # we only need to copy 1 file, don't mess around with zips
            file_src = query_return['files'][0]['src']
            file_dest = query_return['files'][0]['dest']
            copy_result = self._copy_single_file(file_src, dest, file_dest,
                                                 task_vars)

            result['changed'] = True
            if copy_result.get('failed') is True:
                result['failed'] = True
                result['msg'] = "failed to copy file %s: %s" % (
                    file_src, copy_result['msg'])
        elif len(query_return['files']) > 0 or len(
                query_return['directories']) > 0:
            # either multiple files or directories need to be copied, compress
            # to a zip and 'explode' the zip on the server
            # TODO: handle symlinks
            result.update(
                self._copy_zip_file(dest, source_files['files'],
                                    source_files['directories'], task_vars))
            result['changed'] = True
        else:
            # no operations need to occur
            result['failed'] = False
            result['changed'] = False

        # remove the content temp file if it was created
        self._remove_tempfile_if_content_defined(content, content_tempfile)
        return result
Exemplo n.º 36
0
    def run(self, tmp=None, task_vars=None):
        self._supports_check_mode = True
        self._supports_async = True

        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        category_names = self._task.args.get('category_names', [
            'CriticalUpdates',
            'SecurityUpdates',
            'UpdateRollups',
        ])
        state = self._task.args.get('state', 'installed')
        reboot = self._task.args.get('reboot', False)
        reboot_timeout = self._task.args.get('reboot_timeout',
                                             self.DEFAULT_REBOOT_TIMEOUT)

        # Validate the options
        try:
            self._validate_categories(category_names)
        except AnsibleError as exc:
            result['failed'] = True
            result['msg'] = to_text(exc)
            return result

        if state not in ['installed', 'searched']:
            result['failed'] = True
            result['msg'] = "state must be either installed or searched"
            return result

        try:
            reboot = boolean(reboot)
        except TypeError as exc:
            result['failed'] = True
            result['msg'] = "cannot parse reboot as a boolean: %s" % to_text(
                exc)
            return result

        if not isinstance(reboot_timeout, int):
            result['failed'] = True
            result['msg'] = "reboot_timeout must be an integer"
            return result

        if reboot and self._task.async_val > 0:
            result['failed'] = True
            result['msg'] = "async is not supported for this task when " \
                            "reboot=yes"
            return result

        # Run the module
        new_module_args = self._task.args.copy()
        new_module_args.pop('reboot', None)
        new_module_args.pop('reboot_timeout', None)
        result = self._run_win_updates(new_module_args, task_vars)

        changed = result['changed']
        updates = result.get('updates', dict())
        filtered_updates = result.get('filtered_updates', dict())
        found_update_count = result.get('found_update_count', 0)
        installed_update_count = result.get('installed_update_count', 0)

        # Handle automatic reboots if the reboot flag is set
        if reboot and state == 'installed' and not \
                self._play_context.check_mode:
            previously_errored = False
            while result['installed_update_count'] > 0 or \
                    result['found_update_count'] > 0 or \
                    result['reboot_required'] is True:
                display.vvv("win_updates: check win_updates results for "
                            "automatic reboot: %s" % json.dumps(result))

                # check if the module failed, break from the loop if it
                # previously failed and return error to the user
                if result.get('failed', False):
                    if previously_errored:
                        break
                    previously_errored = True
                else:
                    previously_errored = False

                reboot_error = None
                # check if a reboot was required before installing the updates
                if result.get('msg', '') == "A reboot is required before " \
                                            "more updates can be installed":
                    reboot_error = "reboot was required before more updates " \
                                   "can be installed"

                if result.get('reboot_required', False):
                    if reboot_error is None:
                        reboot_error = "reboot was required to finalise " \
                                       "update install"
                    try:
                        changed = True
                        self._reboot_server(task_vars, reboot_timeout)
                    except AnsibleError as exc:
                        result['failed'] = True
                        result['msg'] = "Failed to reboot remote host when " \
                                        "%s: %s" \
                                        % (reboot_error, to_text(exc))
                        break

                result.pop('msg', None)
                # rerun the win_updates module after the reboot is complete
                result = self._run_win_updates(new_module_args, task_vars)

                result_updates = result.get('updates', dict())
                result_filtered_updates = result.get('filtered_updates',
                                                     dict())
                updates = self._merge_dict(updates, result_updates)
                filtered_updates = self._merge_dict(filtered_updates,
                                                    result_filtered_updates)
                found_update_count += result.get('found_update_count', 0)
                installed_update_count += result.get('installed_update_count',
                                                     0)
                if result['changed']:
                    changed = True

        # finally create the return dict based on the aggregated execution
        # values if we are not in async
        if self._task.async_val == 0:
            result['changed'] = changed
            result['updates'] = updates
            result['filtered_updates'] = filtered_updates
            result['found_update_count'] = found_update_count
            result['installed_update_count'] = installed_update_count

        return result
Exemplo n.º 37
0
def run_module():
    # ********************************************************** #
    #                Module initialization                       #
    # ********************************************************** #
    module = AnsibleModule(argument_spec=dict(
        src=dict(required=True, type="str"),
        dest=dict(required=True, type="path"),
        fail_on_missing=dict(required=False, default=True, type="bool"),
        flat=dict(required=False, default=True, type="bool"),
        is_binary=dict(required=False, default=False, type="bool"),
        use_qualifier=dict(required=False, default=False, type="bool"),
        validate_checksum=dict(required=False, default=True, type="bool"),
        encoding=dict(required=False, type="dict"),
        sftp_port=dict(type="int", required=False),
        ignore_sftp_stderr=dict(type="bool", default=False, required=False),
        local_charset=dict(type="str"),
    ))

    src = module.params.get("src")
    if module.params.get("use_qualifier"):
        module.params["src"] = datasets.hlq() + "." + src

    # ********************************************************** #
    #                   Verify paramater validity                #
    # ********************************************************** #

    arg_def = dict(
        src=dict(arg_type="data_set_or_path", required=True),
        dest=dict(arg_type="path", required=True),
        fail_on_missing=dict(arg_type="bool", required=False, default=True),
        is_binary=dict(arg_type="bool", required=False, default=False),
        use_qualifier=dict(arg_type="bool", required=False, default=False),
    )

    if not module.params.get("encoding") and not module.params.get(
            "is_binary"):
        mvs_src = data_set.is_data_set(src)
        remote_charset = encode.Defaults.get_default_system_charset()

        module.params["encoding"] = {
            "from": encode.Defaults.DEFAULT_EBCDIC_MVS_CHARSET
            if mvs_src else remote_charset,
            "to": module.params.get("local_charset"),
        }

    if module.params.get("encoding"):
        module.params.update(
            dict(
                from_encoding=module.params.get("encoding").get("from"),
                to_encoding=module.params.get("encoding").get("to"),
            ))
        arg_def.update(
            dict(
                from_encoding=dict(arg_type="encoding"),
                to_encoding=dict(arg_type="encoding"),
            ))

    fetch_handler = FetchHandler(module)
    try:
        parser = better_arg_parser.BetterArgParser(arg_def)
        parsed_args = parser.parse_args(module.params)
    except ValueError as err:
        module.fail_json(msg="Parameter verification failed", stderr=str(err))
    src = parsed_args.get("src")
    b_src = to_bytes(src)
    fail_on_missing = boolean(parsed_args.get("fail_on_missing"))
    is_binary = boolean(parsed_args.get("is_binary"))
    encoding = module.params.get("encoding")

    # ********************************************************** #
    #  Check for data set existence and determine its type       #
    # ********************************************************** #

    res_args = dict()
    _fetch_member = "(" in src and src.endswith(")")
    ds_name = src if not _fetch_member else src[:src.find("(")]
    try:
        ds_utils = data_set.DataSetUtils(ds_name)
        if not ds_utils.exists():
            if fail_on_missing:
                module.fail_json(msg=("The source '{0}' does not exist or is "
                                      "uncataloged".format(ds_name)))
            module.exit_json(
                note=("Source '{0}' was not found. No data was fetched".format(
                    ds_name)))
        ds_type = ds_utils.ds_type()
        if not ds_type:
            module.fail_json(msg="Unable to determine data set type")

    except Exception as err:
        module.fail_json(msg="Error while gathering data set information",
                         stderr=str(err))

    # ********************************************************** #
    #                  Fetch a sequential data set               #
    # ********************************************************** #

    if ds_type == "PS":
        file_path = fetch_handler._fetch_mvs_data(src, is_binary, encoding)
        res_args["remote_path"] = file_path

    # ********************************************************** #
    #    Fetch a partitioned data set or one of its members      #
    # ********************************************************** #

    elif ds_type == "PO":
        if _fetch_member:
            member_name = src[src.find("(") + 1:src.find(")")]
            if not ds_utils.member_exists(member_name):
                module.fail_json(
                    msg=("The data set member '{0}' was not found inside data "
                         "set '{1}'").format(member_name, ds_name))
            file_path = fetch_handler._fetch_mvs_data(src, is_binary, encoding)
            res_args["remote_path"] = file_path
        else:
            res_args["remote_path"] = fetch_handler._fetch_pdse(
                src, is_binary, encoding)

    # ********************************************************** #
    #                  Fetch a USS file                          #
    # ********************************************************** #

    elif ds_type == "USS":
        if not os.access(b_src, os.R_OK):
            module.fail_json(
                msg="File '{0}' does not have appropriate read permission".
                format(src))
        file_path = fetch_handler._fetch_uss_file(src, is_binary, encoding)
        res_args["remote_path"] = file_path

    # ********************************************************** #
    #                  Fetch a VSAM data set                     #
    # ********************************************************** #

    elif ds_type == "VSAM":
        file_path = fetch_handler._fetch_vsam(src, is_binary, encoding)
        res_args["remote_path"] = file_path

    res_args["file"] = ds_name
    res_args["ds_type"] = ds_type
    module.exit_json(**res_args)
Exemplo n.º 38
0
def ensure_type(value, value_type, origin=None):
    ''' return a configuration variable with casting
    :arg value: The value to ensure correct typing of
    :kwarg value_type: The type of the value.  This can be any of the following strings:
        :boolean: sets the value to a True or False value
        :bool: Same as 'boolean'
        :integer: Sets the value to an integer or raises a ValueType error
        :int: Same as 'integer'
        :float: Sets the value to a float or raises a ValueType error
        :list: Treats the value as a comma separated list.  Split the value
            and return it as a python list.
        :none: Sets the value to None
        :path: Expands any environment variables and tilde's in the value.
        :tmppath: Create a unique temporary directory inside of the directory
            specified by value and return its path.
        :temppath: Same as 'tmppath'
        :tmp: Same as 'tmppath'
        :pathlist: Treat the value as a typical PATH string.  (On POSIX, this
            means colon separated strings.)  Split the value and then expand
            each part for environment variables and tildes.
        :pathspec: Treat the value as a PATH string. Expands any environment variables
            tildes's in the value.
        :str: Sets the value to string types.
        :string: Same as 'str'
    '''

    errmsg = ''
    basedir = None
    if origin and os.path.isabs(origin) and os.path.exists(to_bytes(origin)):
        basedir = origin

    if value_type:
        value_type = value_type.lower()

    if value is not None:
        if value_type in ('boolean', 'bool'):
            value = boolean(value, strict=False)

        elif value_type in ('integer', 'int'):
            value = int(value)

        elif value_type == 'float':
            value = float(value)

        elif value_type == 'list':
            if isinstance(value, string_types):
                value = [unquote(x.strip()) for x in value.split(',')]
            elif not isinstance(value, Sequence):
                errmsg = 'list'

        elif value_type == 'none':
            if value == "None":
                value = None

            if value is not None:
                errmsg = 'None'

        elif value_type == 'path':
            if isinstance(value, string_types):
                value = resolve_path(value, basedir=basedir)
            else:
                errmsg = 'path'

        elif value_type in ('tmp', 'temppath', 'tmppath'):
            if isinstance(value, string_types):
                value = resolve_path(value, basedir=basedir)
                if not os.path.exists(value):
                    makedirs_safe(value, 0o700)
                prefix = 'ansible-local-%s' % os.getpid()
                value = tempfile.mkdtemp(prefix=prefix, dir=value)
                atexit.register(cleanup_tmp_file, value, warn=True)
            else:
                errmsg = 'temppath'

        elif value_type == 'pathspec':
            if isinstance(value, string_types):
                value = value.split(os.pathsep)

            if isinstance(value, Sequence):
                value = [resolve_path(x, basedir=basedir) for x in value]
            else:
                errmsg = 'pathspec'

        elif value_type == 'pathlist':
            if isinstance(value, string_types):
                value = [x.strip() for x in value.split(',')]

            if isinstance(value, Sequence):
                value = [resolve_path(x, basedir=basedir) for x in value]
            else:
                errmsg = 'pathlist'

        elif value_type in ('dict', 'dictionary'):
            if not isinstance(value, Mapping):
                errmsg = 'dictionary'

        elif value_type in ('str', 'string'):
            if isinstance(value, (string_types, AnsibleVaultEncryptedUnicode,
                                  bool, int, float, complex)):
                value = unquote(to_text(value, errors='surrogate_or_strict'))
            else:
                errmsg = 'string'

        # defaults to string type
        elif isinstance(value, (string_types, AnsibleVaultEncryptedUnicode)):
            value = unquote(to_text(value, errors='surrogate_or_strict'))

        if errmsg:
            raise ValueError('Invalid type provided for "%s": %s' %
                             (errmsg, to_native(value)))

    return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
Exemplo n.º 39
0
def _list_into_cache(regions):
    groups = collections.defaultdict(list)
    hostvars = collections.defaultdict(dict)
    images = {}
    cbs_attachments = collections.defaultdict(dict)

    prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta')

    try:
        # Ansible 2.3+
        networks = get_config(p, 'rax', 'access_network',
                              'RAX_ACCESS_NETWORK', 'public', value_type='list')
    except TypeError:
        # Ansible 2.2.x and below
        networks = get_config(p, 'rax', 'access_network',
                              'RAX_ACCESS_NETWORK', 'public', islist=True)
    try:
        try:
            ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
                                              'RAX_ACCESS_IP_VERSION', 4, value_type='list'))
        except TypeError:
            ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
                                              'RAX_ACCESS_IP_VERSION', 4, islist=True))
    except:
        ip_versions = [4]
    else:
        ip_versions = [v for v in ip_versions if v in [4, 6]]
        if not ip_versions:
            ip_versions = [4]

    # Go through all the regions looking for servers
    for region in regions:
        # Connect to the region
        cs = pyrax.connect_to_cloudservers(region=region)
        if cs is None:
            warnings.warn(
                'Connecting to Rackspace region "%s" has caused Pyrax to '
                'return None. Is this a valid region?' % region,
                RuntimeWarning)
            continue
        for server in cs.servers.list():
            # Create a group on region
            groups[region].append(server.name)

            # Check if group metadata key in servers' metadata
            group = server.metadata.get('group')
            if group:
                groups[group].append(server.name)

            for extra_group in server.metadata.get('groups', '').split(','):
                if extra_group:
                    groups[extra_group].append(server.name)

            # Add host metadata
            for key, value in to_dict(server).items():
                hostvars[server.name][key] = value

            hostvars[server.name]['rax_region'] = region

            for key, value in iteritems(server.metadata):
                groups['%s_%s_%s' % (prefix, key, value)].append(server.name)

            groups['instance-%s' % server.id].append(server.name)
            groups['flavor-%s' % server.flavor['id']].append(server.name)

            # Handle boot from volume
            if not server.image:
                if not cbs_attachments[region]:
                    cbs = pyrax.connect_to_cloud_blockstorage(region)
                    for vol in cbs.list():
                        if boolean(vol.bootable, strict=False):
                            for attachment in vol.attachments:
                                metadata = vol.volume_image_metadata
                                server_id = attachment['server_id']
                                cbs_attachments[region][server_id] = {
                                    'id': metadata['image_id'],
                                    'name': slugify(metadata['image_name'])
                                }
                image = cbs_attachments[region].get(server.id)
                if image:
                    server.image = {'id': image['id']}
                    hostvars[server.name]['rax_image'] = server.image
                    hostvars[server.name]['rax_boot_source'] = 'volume'
                    images[image['id']] = image['name']
            else:
                hostvars[server.name]['rax_boot_source'] = 'local'

            try:
                imagegroup = 'image-%s' % images[server.image['id']]
                groups[imagegroup].append(server.name)
                groups['image-%s' % server.image['id']].append(server.name)
            except KeyError:
                try:
                    image = cs.images.get(server.image['id'])
                except cs.exceptions.NotFound:
                    groups['image-%s' % server.image['id']].append(server.name)
                else:
                    images[image.id] = image.human_id
                    groups['image-%s' % image.human_id].append(server.name)
                    groups['image-%s' % server.image['id']].append(server.name)

            # And finally, add an IP address
            ansible_ssh_host = None
            # use accessIPv[46] instead of looping address for 'public'
            for network_name in networks:
                if ansible_ssh_host:
                    break
                if network_name == 'public':
                    for version_name in ip_versions:
                        if ansible_ssh_host:
                            break
                        if version_name == 6 and server.accessIPv6:
                            ansible_ssh_host = server.accessIPv6
                        elif server.accessIPv4:
                            ansible_ssh_host = server.accessIPv4
                if not ansible_ssh_host:
                    addresses = server.addresses.get(network_name, [])
                    for address in addresses:
                        for version_name in ip_versions:
                            if ansible_ssh_host:
                                break
                            if address.get('version') == version_name:
                                ansible_ssh_host = address.get('addr')
                                break
            if ansible_ssh_host:
                hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host

    if hostvars:
        groups['_meta'] = {'hostvars': hostvars}

    with open(get_cache_file_path(regions), 'w') as cache_file:
        json.dump(groups, cache_file)
Exemplo n.º 40
0
    def _copy_file(self, source_full, source_rel, content, content_tempfile,
                   dest, task_vars, tmp, delete_remote_tmp):
        decrypt = boolean(self._task.args.get('decrypt', True), strict=False)
        follow = boolean(self._task.args.get('follow', False), strict=False)
        force = boolean(self._task.args.get('force', 'yes'), strict=False)
        raw = boolean(self._task.args.get('raw', 'no'), strict=False)

        result = {}
        result['diff'] = []

        # If the local file does not exist, get_real_file() raises AnsibleFileNotFound
        try:
            source_full = self._loader.get_real_file(source_full,
                                                     decrypt=decrypt)
        except AnsibleFileNotFound as e:
            result['failed'] = True
            result['msg'] = "could not find src=%s, %s" % (source_full,
                                                           to_text(e))
            self._remove_tmp_path(tmp)
            return result

        # Get the local mode and set if user wanted it preserved
        # https://github.com/ansible/ansible-modules-core/issues/1124
        lmode = None
        if self._task.args.get('mode', None) == 'preserve':
            lmode = '0%03o' % stat.S_IMODE(os.stat(source_full).st_mode)

        # This is kind of optimization - if user told us destination is
        # dir, do path manipulation right away, otherwise we still check
        # for dest being a dir via remote call below.
        if self._connection._shell.path_has_trailing_slash(dest):
            dest_file = self._connection._shell.join_path(dest, source_rel)
        else:
            dest_file = self._connection._shell.join_path(dest)

        # Create a tmp path if missing only if this is not recursive.
        # If this is recursive we already have a tmp path.
        if delete_remote_tmp:
            if tmp is None or "-tmp-" not in tmp:
                tmp = self._make_tmp_path()

        # Attempt to get remote file info
        dest_status = self._execute_remote_stat(dest_file,
                                                all_vars=task_vars,
                                                follow=follow,
                                                tmp=tmp,
                                                checksum=force)

        if dest_status['exists'] and dest_status['isdir']:
            # The dest is a directory.
            if content is not None:
                # If source was defined as content remove the temporary file and fail out.
                self._remove_tempfile_if_content_defined(
                    content, content_tempfile)
                self._remove_tmp_path(tmp)
                result['failed'] = True
                result['msg'] = "can not use content with a dir as dest"
                return result
            else:
                # Append the relative source location to the destination and get remote stats again
                dest_file = self._connection._shell.join_path(dest, source_rel)
                dest_status = self._execute_remote_stat(dest_file,
                                                        all_vars=task_vars,
                                                        follow=follow,
                                                        tmp=tmp,
                                                        checksum=force)

        if dest_status['exists'] and not force:
            # remote_file exists so continue to next iteration.
            return None

        # Generate a hash of the local file.
        local_checksum = checksum(source_full)

        if local_checksum != dest_status['checksum']:
            # The checksums don't match and we will change or error out.

            if self._play_context.diff and not raw:
                result['diff'].append(
                    self._get_diff_data(dest_file, source_full, task_vars))

            if self._play_context.check_mode:
                self._remove_tempfile_if_content_defined(
                    content, content_tempfile)
                result['changed'] = True
                return result

            # Define a remote directory that we will copy the file to.
            tmp_src = self._connection._shell.join_path(tmp, 'source')

            remote_path = None

            if not raw:
                remote_path = self._transfer_file(source_full, tmp_src)
            else:
                self._transfer_file(source_full, dest_file)

            # We have copied the file remotely and no longer require our content_tempfile
            self._remove_tempfile_if_content_defined(content, content_tempfile)
            self._loader.cleanup_tmp_file(source_full)

            # fix file permissions when the copy is done as a different user
            if remote_path:
                self._fixup_perms2((tmp, remote_path))

            if raw:
                # Continue to next iteration if raw is defined.
                return None

            # Run the copy module

            # src and dest here come after original and override them
            # we pass dest only to make sure it includes trailing slash in case of recursive copy
            new_module_args = self._task.args.copy()
            new_module_args.update(
                dict(
                    src=tmp_src,
                    dest=dest,
                    original_basename=source_rel,
                ))
            if lmode:
                new_module_args['mode'] = lmode

            # remove action plugin only keys
            for key in ('content', 'decrypt'):
                if key in new_module_args:
                    del new_module_args[key]

            module_return = self._execute_module(
                module_name='copy',
                module_args=new_module_args,
                task_vars=task_vars,
                tmp=tmp,
                delete_remote_tmp=delete_remote_tmp)

        else:
            # no need to transfer the file, already correct hash, but still need to call
            # the file module in case we want to change attributes
            self._remove_tempfile_if_content_defined(content, content_tempfile)
            self._loader.cleanup_tmp_file(source_full)

            if raw:
                # Continue to next iteration if raw is defined.
                self._remove_tmp_path(tmp)
                return None

            # Fix for https://github.com/ansible/ansible-modules-core/issues/1568.
            # If checksums match, and follow = True, find out if 'dest' is a link. If so,
            # change it to point to the source of the link.
            if follow:
                dest_status_nofollow = self._execute_remote_stat(
                    dest_file, all_vars=task_vars, tmp=tmp, follow=False)
                if dest_status_nofollow[
                        'islnk'] and 'lnk_source' in dest_status_nofollow.keys(
                        ):
                    dest = dest_status_nofollow['lnk_source']

            # Build temporary module_args.
            new_module_args = self._task.args.copy()
            new_module_args.update(
                dict(src=source_rel, dest=dest, original_basename=source_rel))
            if lmode:
                new_module_args['mode'] = lmode

            # Execute the file module.
            module_return = self._execute_module(
                module_name='file',
                module_args=new_module_args,
                task_vars=task_vars,
                tmp=tmp,
                delete_remote_tmp=delete_remote_tmp)

        if not module_return.get('checksum'):
            module_return['checksum'] = local_checksum

        result.update(module_return)
        return result
Exemplo n.º 41
0
    def run(self, tmp=None, task_vars=None):
        ''' handler for file transfer operations '''
        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)

        source = self._task.args.get('src', None)
        content = self._task.args.get('content', None)
        dest = self._task.args.get('dest', None)
        remote_src = boolean(self._task.args.get('remote_src', False),
                             strict=False)
        local_follow = boolean(self._task.args.get('local_follow', True),
                               strict=False)

        result['failed'] = True
        if not source and content is None:
            result['msg'] = 'src (or content) is required'
        elif not dest:
            result['msg'] = 'dest is required'
        elif source and content is not None:
            result['msg'] = 'src and content are mutually exclusive'
        elif content is not None and dest is not None and dest.endswith("/"):
            result['msg'] = "can not use content with a dir as dest"
        else:
            del result['failed']

        if result.get('failed'):
            return result

        # Define content_tempfile in case we set it after finding content populated.
        content_tempfile = None

        # If content is defined make a temp file and write the content into it.
        if content is not None:
            try:
                # If content comes to us as a dict it should be decoded json.
                # We need to encode it back into a string to write it out.
                if isinstance(content, dict) or isinstance(content, list):
                    content_tempfile = self._create_content_tempfile(
                        json.dumps(content))
                else:
                    content_tempfile = self._create_content_tempfile(content)
                source = content_tempfile
            except Exception as err:
                result['failed'] = True
                result[
                    'msg'] = "could not write content temp file: %s" % to_native(
                        err)
                return result

        # if we have first_available_file in our vars
        # look up the files and use the first one we find as src
        elif remote_src:
            result.update(self._execute_module(task_vars=task_vars))
            return result
        else:
            # find_needle returns a path that may not have a trailing slash on
            # a directory so we need to determine that now (we use it just
            # like rsync does to figure out whether to include the directory
            # or only the files inside the directory
            trailing_slash = source.endswith(os.path.sep)
            try:
                # find in expected paths
                source = self._find_needle('files', source)
            except AnsibleError as e:
                result['failed'] = True
                result['msg'] = to_text(e)
                result['exception'] = traceback.format_exc()
                return result

            if trailing_slash != source.endswith(os.path.sep):
                if source[-1] == os.path.sep:
                    source = source[:-1]
                else:
                    source = source + os.path.sep

        # A list of source file tuples (full_path, relative_path) which will try to copy to the destination
        source_files = {'files': [], 'directories': [], 'symlinks': []}

        # If source is a directory populate our list else source is a file and translate it to a tuple.
        if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')):
            # Get a list of the files we want to replicate on the remote side
            source_files = _walk_dirs(source,
                                      local_follow=local_follow,
                                      trailing_slash_detector=self._connection.
                                      _shell.path_has_trailing_slash)

            # If it's recursive copy, destination is always a dir,
            # explicitly mark it so (note - copy module relies on this).
            if not self._connection._shell.path_has_trailing_slash(dest):
                dest = self._connection._shell.join_path(dest, '')
            # FIXME: Can we optimize cases where there's only one file, no
            # symlinks and any number of directories?  In the original code,
            # empty directories are not copied....
        else:
            source_files['files'] = [(source, os.path.basename(source))]

        changed = False
        module_return = dict(changed=False)

        # A register for if we executed a module.
        # Used to cut down on command calls when not recursive.
        module_executed = False

        # Optimization: Can delete remote_tmp on the first call if we're only
        # copying a single file.  Otherwise we keep the remote_tmp until it
        # is no longer needed.
        delete_remote_tmp = False
        if sum(len(f) for f in chain(source_files.values())) == 1:
            # Tell _execute_module to delete the file if there is one file.
            delete_remote_tmp = True

        # If this is a recursive action create a tmp path that we can share as the _exec_module create is too late.
        if not delete_remote_tmp:
            if tmp is None or "-tmp-" not in tmp:
                tmp = self._make_tmp_path()

        # expand any user home dir specifier
        dest = self._remote_expand_user(dest)

        implicit_directories = set()
        for source_full, source_rel in source_files['files']:
            # copy files over.  This happens first as directories that have
            # a file do not need to be created later
            module_return = self._copy_file(source_full, source_rel, content,
                                            content_tempfile, dest, task_vars,
                                            tmp, delete_remote_tmp)
            if module_return is None:
                continue

            paths = os.path.split(source_rel)
            dir_path = ''
            for dir_component in paths:
                os.path.join(dir_path, dir_component)
                implicit_directories.add(dir_path)
            if 'diff' in result and not result['diff']:
                del result['diff']
            module_executed = True
            changed = changed or module_return.get('changed', False)

        for src, dest_path in source_files['directories']:
            # Find directories that are leaves as they might not have been
            # created yet.
            if dest_path in implicit_directories:
                continue

            # Use file module to create these
            new_module_args = self._get_file_args()
            new_module_args['path'] = os.path.join(dest, dest_path)
            new_module_args['state'] = 'directory'
            new_module_args['mode'] = self._task.args.get(
                'directory_mode', None)

            module_return = self._execute_module(
                module_name='file',
                module_args=new_module_args,
                task_vars=task_vars,
                tmp=tmp,
                delete_remote_tmp=delete_remote_tmp)
            module_executed = True
            changed = changed or module_return.get('changed', False)

        for target_path, dest_path in source_files['symlinks']:
            # Copy symlinks over
            new_module_args = self._get_file_args()
            new_module_args['path'] = os.path.join(dest, dest_path)
            new_module_args['src'] = target_path
            new_module_args['state'] = 'link'
            new_module_args['force'] = True

            module_return = self._execute_module(
                module_name='file',
                module_args=new_module_args,
                task_vars=task_vars,
                tmp=tmp,
                delete_remote_tmp=delete_remote_tmp)
            module_executed = True

            if module_return.get('failed'):
                result.update(module_return)
                if not delete_remote_tmp:
                    self._remove_tmp_path(tmp)
                return result

            changed = changed or module_return.get('changed', False)

            # the file module returns the file path as 'path', but
            # the copy module uses 'dest', so add it if it's not there
            if 'path' in module_return and 'dest' not in module_return:
                module_return['dest'] = module_return['path']

        # Delete tmp path if we were recursive or if we did not execute a module.
        if not delete_remote_tmp or (delete_remote_tmp
                                     and not module_executed):
            self._remove_tmp_path(tmp)

        if module_executed and len(source_files['files']) == 1:
            result.update(module_return)
        else:
            result.update(dict(dest=dest, src=source, changed=changed))

        return result
Exemplo n.º 42
0
    def run(self, tmp=None, task_vars=None):
        self._supports_check_mode = True
        self._supports_async = True

        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        category_names = self._task.args.get('category_names', [
            'CriticalUpdates',
            'SecurityUpdates',
            'UpdateRollups',
        ])
        if isinstance(category_names, AnsibleUnicode):
            category_names = [cat.strip() for cat in category_names.split(",")]

        state = self._task.args.get('state', 'installed')
        reboot = self._task.args.get('reboot', False)
        reboot_timeout = self._task.args.get('reboot_timeout',
                                             self.DEFAULT_REBOOT_TIMEOUT)

        # Validate the options
        try:
            self._validate_categories(category_names)
        except AnsibleError as exc:
            result['failed'] = True
            result['msg'] = to_text(exc)
            return result

        if state not in ['installed', 'searched']:
            result['failed'] = True
            result['msg'] = "state must be either installed or searched"
            return result

        try:
            reboot = boolean(reboot)
        except TypeError as exc:
            result['failed'] = True
            result['msg'] = "cannot parse reboot as a boolean: %s" % to_text(exc)
            return result

        if not isinstance(reboot_timeout, int):
            result['failed'] = True
            result['msg'] = "reboot_timeout must be an integer"
            return result

        if reboot and self._task.async_val > 0:
            result['failed'] = True
            result['msg'] = "async is not supported for this task when " \
                            "reboot=yes"
            return result

        # Run the module
        new_module_args = self._task.args.copy()
        new_module_args.pop('reboot', None)
        new_module_args.pop('reboot_timeout', None)
        result = self._run_win_updates(new_module_args, task_vars)

        # if the module failed to run at all then changed won't be populated
        # so we just return the result as is
        # https://github.com/ansible/ansible/issues/38232
        failed = result.get('failed', False)
        if "updates" not in result.keys() or failed:
            result['failed'] = True
            return result

        changed = result.get('changed', False)
        updates = result.get('updates', dict())
        filtered_updates = result.get('filtered_updates', dict())
        found_update_count = result.get('found_update_count', 0)
        installed_update_count = result.get('installed_update_count', 0)

        # Handle automatic reboots if the reboot flag is set
        if reboot and state == 'installed' and not \
                self._play_context.check_mode:
            previously_errored = False
            while result['installed_update_count'] > 0 or \
                    result['found_update_count'] > 0 or \
                    result['reboot_required'] is True:
                display.vvv("win_updates: check win_updates results for "
                            "automatic reboot: %s" % json.dumps(result))

                # check if the module failed, break from the loop if it
                # previously failed and return error to the user
                if result.get('failed', False):
                    if previously_errored:
                        break
                    previously_errored = True
                else:
                    previously_errored = False

                reboot_error = None
                # check if a reboot was required before installing the updates
                if result.get('msg', '') == "A reboot is required before " \
                                            "more updates can be installed":
                    reboot_error = "reboot was required before more updates " \
                                   "can be installed"

                if result.get('reboot_required', False):
                    if reboot_error is None:
                        reboot_error = "reboot was required to finalise " \
                                       "update install"
                    try:
                        changed = True
                        self._reboot_server(task_vars, reboot_timeout)
                    except AnsibleError as exc:
                        result['failed'] = True
                        result['msg'] = "Failed to reboot remote host when " \
                                        "%s: %s" \
                                        % (reboot_error, to_text(exc))
                        break

                result.pop('msg', None)
                # rerun the win_updates module after the reboot is complete
                result = self._run_win_updates(new_module_args, task_vars)
                if result.get('failed', False):
                    return result

                result_updates = result.get('updates', dict())
                result_filtered_updates = result.get('filtered_updates', dict())
                updates = self._merge_dict(updates, result_updates)
                filtered_updates = self._merge_dict(filtered_updates,
                                                    result_filtered_updates)
                found_update_count += result.get('found_update_count', 0)
                installed_update_count += result.get('installed_update_count', 0)
                if result['changed']:
                    changed = True

        # finally create the return dict based on the aggregated execution
        # values if we are not in async
        if self._task.async_val == 0:
            result['changed'] = changed
            result['updates'] = updates
            result['filtered_updates'] = filtered_updates
            result['found_update_count'] = found_update_count
            result['installed_update_count'] = installed_update_count

        return result
Exemplo n.º 43
0
    def _get_next_task_from_state(self, state, host):

        task = None

        # try and find the next task, given the current state.
        while True:
            # try to get the current block from the list of blocks, and
            # if we run past the end of the list we know we're done with
            # this block
            try:
                block = state._blocks[state.cur_block]
            except IndexError:
                state.run_state = IteratingStates.COMPLETE
                return (state, None)

            if state.run_state == IteratingStates.SETUP:
                # First, we check to see if we were pending setup. If not, this is
                # the first trip through IteratingStates.SETUP, so we set the pending_setup
                # flag and try to determine if we do in fact want to gather facts for
                # the specified host.
                if not state.pending_setup:
                    state.pending_setup = True

                    # Gather facts if the default is 'smart' and we have not yet
                    # done it for this host; or if 'explicit' and the play sets
                    # gather_facts to True; or if 'implicit' and the play does
                    # NOT explicitly set gather_facts to False.

                    gathering = C.DEFAULT_GATHERING
                    implied = self._play.gather_facts is None or boolean(
                        self._play.gather_facts, strict=False)

                    if (gathering == 'implicit' and implied) or \
                       (gathering == 'explicit' and boolean(self._play.gather_facts, strict=False)) or \
                       (gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name, {}).get('_ansible_facts_gathered', False))):
                        # The setup block is always self._blocks[0], as we inject it
                        # during the play compilation in __init__ above.
                        setup_block = self._blocks[0]
                        if setup_block.has_tasks() and len(
                                setup_block.block) > 0:
                            task = setup_block.block[0]
                else:
                    # This is the second trip through IteratingStates.SETUP, so we clear
                    # the flag and move onto the next block in the list while setting
                    # the run state to IteratingStates.TASKS
                    state.pending_setup = False

                    state.run_state = IteratingStates.TASKS
                    if not state.did_start_at_task:
                        state.cur_block += 1
                        state.cur_regular_task = 0
                        state.cur_rescue_task = 0
                        state.cur_always_task = 0
                        state.tasks_child_state = None
                        state.rescue_child_state = None
                        state.always_child_state = None

            elif state.run_state == IteratingStates.TASKS:
                # clear the pending setup flag, since we're past that and it didn't fail
                if state.pending_setup:
                    state.pending_setup = False

                # First, we check for a child task state that is not failed, and if we
                # have one recurse into it for the next task. If we're done with the child
                # state, we clear it and drop back to getting the next task from the list.
                if state.tasks_child_state:
                    (state.tasks_child_state,
                     task) = self._get_next_task_from_state(
                         state.tasks_child_state, host=host)
                    if self._check_failed_state(state.tasks_child_state):
                        # failed child state, so clear it and move into the rescue portion
                        state.tasks_child_state = None
                        self._set_failed_state(state)
                    else:
                        # get the next task recursively
                        if task is None or state.tasks_child_state.run_state == IteratingStates.COMPLETE:
                            # we're done with the child state, so clear it and continue
                            # back to the top of the loop to get the next task
                            state.tasks_child_state = None
                            continue
                else:
                    # First here, we check to see if we've failed anywhere down the chain
                    # of states we have, and if so we move onto the rescue portion. Otherwise,
                    # we check to see if we've moved past the end of the list of tasks. If so,
                    # we move into the always portion of the block, otherwise we get the next
                    # task from the list.
                    if self._check_failed_state(state):
                        state.run_state = IteratingStates.RESCUE
                    elif state.cur_regular_task >= len(block.block):
                        state.run_state = IteratingStates.ALWAYS
                    else:
                        task = block.block[state.cur_regular_task]
                        # if the current task is actually a child block, create a child
                        # state for us to recurse into on the next pass
                        if isinstance(task, Block):
                            state.tasks_child_state = HostState(blocks=[task])
                            state.tasks_child_state.run_state = IteratingStates.TASKS
                            # since we've created the child state, clear the task
                            # so we can pick up the child state on the next pass
                            task = None
                        state.cur_regular_task += 1

            elif state.run_state == IteratingStates.RESCUE:
                # The process here is identical to IteratingStates.TASKS, except instead
                # we move into the always portion of the block.
                if host.name in self._play._removed_hosts:
                    self._play._removed_hosts.remove(host.name)

                if state.rescue_child_state:
                    (state.rescue_child_state,
                     task) = self._get_next_task_from_state(
                         state.rescue_child_state, host=host)
                    if self._check_failed_state(state.rescue_child_state):
                        state.rescue_child_state = None
                        self._set_failed_state(state)
                    else:
                        if task is None or state.rescue_child_state.run_state == IteratingStates.COMPLETE:
                            state.rescue_child_state = None
                            continue
                else:
                    if state.fail_state & FailedStates.RESCUE == FailedStates.RESCUE:
                        state.run_state = IteratingStates.ALWAYS
                    elif state.cur_rescue_task >= len(block.rescue):
                        if len(block.rescue) > 0:
                            state.fail_state = FailedStates.NONE
                        state.run_state = IteratingStates.ALWAYS
                        state.did_rescue = True
                    else:
                        task = block.rescue[state.cur_rescue_task]
                        if isinstance(task, Block):
                            state.rescue_child_state = HostState(blocks=[task])
                            state.rescue_child_state.run_state = IteratingStates.TASKS
                            task = None
                        state.cur_rescue_task += 1

            elif state.run_state == IteratingStates.ALWAYS:
                # And again, the process here is identical to IteratingStates.TASKS, except
                # instead we either move onto the next block in the list, or we set the
                # run state to IteratingStates.COMPLETE in the event of any errors, or when we
                # have hit the end of the list of blocks.
                if state.always_child_state:
                    (state.always_child_state,
                     task) = self._get_next_task_from_state(
                         state.always_child_state, host=host)
                    if self._check_failed_state(state.always_child_state):
                        state.always_child_state = None
                        self._set_failed_state(state)
                    else:
                        if task is None or state.always_child_state.run_state == IteratingStates.COMPLETE:
                            state.always_child_state = None
                            continue
                else:
                    if state.cur_always_task >= len(block.always):
                        if state.fail_state != FailedStates.NONE:
                            state.run_state = IteratingStates.COMPLETE
                        else:
                            state.cur_block += 1
                            state.cur_regular_task = 0
                            state.cur_rescue_task = 0
                            state.cur_always_task = 0
                            state.run_state = IteratingStates.TASKS
                            state.tasks_child_state = None
                            state.rescue_child_state = None
                            state.always_child_state = None
                            state.did_rescue = False
                    else:
                        task = block.always[state.cur_always_task]
                        if isinstance(task, Block):
                            state.always_child_state = HostState(blocks=[task])
                            state.always_child_state.run_state = IteratingStates.TASKS
                            task = None
                        state.cur_always_task += 1

            elif state.run_state == IteratingStates.COMPLETE:
                return (state, None)

            # if something above set the task, break out of the loop now
            if task:
                break

        return (state, task)
Exemplo n.º 44
0
    def run(self, tmp=None, task_vars=None):
        result = super(ActionModule, self).run(tmp, task_vars)
        del tmp  # tmp no longer has any effect

        jid = self._task.args.get("jid", "")
        alias = self._task.args.get("alias", "")
        job = self._task.args.get("job", "")
        retries = int(self._task.args.get("retries", 1))
        delay = int(self._task.args.get("delay", 0))
        cleanup = boolean(self._task.args.get("cleanup", True))

        if not jid and not alias and not job:
            raise AnsibleActionFail("one of [jid, alias, job] is required")

        if not retries:
            raise AnsibleActionFail("retries is required")

        if not delay:
            raise AnsibleActionFail("delay is required")

        if job:
            if job not in task_vars['vars']:
                raise AnsibleActionFail("no job among facts")
            job = task_vars['vars'][job]
            if "ansible_job_id" not in job:
                raise AnsibleActionFail("job does not contain results file")
            jid = job["ansible_job_id"]

        if not jid and not alias:
            raise AnsibleActionFail("jid or alias is required")

        # wait
        wait_task = self._task.copy()
        wait_task.args = dict(mode="status")
        if alias:
            wait_task.args["alias"] = alias
        if jid:
            wait_task.args["jid"] = jid

        # wait_task.retries copied from self._task
        # wait_task.delay copied from self._task

        def wait_until(x):
            if not is_started(x):
                raise AnsibleActionFail("job did not start")
            return is_finished(x) or is_failed(x) or is_killed(x)

        wait_action = self._shared_loader_obj.action_loader.get(
            'async_status_id',
            task=wait_task,
            connection=self._connection,
            play_context=self._play_context,
            loader=self._loader,
            templar=self._templar,
            shared_loader_obj=self._shared_loader_obj)
        result.update(
            self.retry(action=wait_action,
                       vars=task_vars,
                       retries=retries,
                       delay=delay,
                       until=lambda x: wait_until(x)))

        # cleanup
        if not is_failed(result) and cleanup:
            cleanup_task = self._task.copy()
            cleanup_task.retries = 0
            cleanup_task.delay = 0
            cleanup_task.args = dict(mode="cleanup")
            if alias:
                cleanup_task.args["alias"] = alias
            if jid:
                cleanup_task.args["jid"] = jid
            self._shared_loader_obj.action_loader.get('async_status_id',
                                                      task=cleanup_task,
                                                      connection=self._connection,
                                                      play_context=self._play_context,
                                                      loader=self._loader,
                                                      templar=self._templar,
                                                      shared_loader_obj=self._shared_loader_obj) \
                .run(task_vars=task_vars)
            result['cleanup'] = True

        return result
Exemplo n.º 45
0
    def run(self, tmp=None, task_vars=None):

        self._supports_check_mode = True

        result = super(ActionModule, self).run(tmp, task_vars)
        result['ansible_facts'] = {}

        modules = C.config.get_config_value('FACTS_MODULES',
                                            variables=task_vars)
        parallel = task_vars.pop('ansible_facts_parallel',
                                 self._task.args.pop('parallel', None))
        if 'smart' in modules:
            connection_map = C.config.get_config_value(
                'CONNECTION_FACTS_MODULES', variables=task_vars)
            network_os = self._task.args.get(
                'network_os',
                task_vars.get(
                    'ansible_network_os',
                    task_vars.get('ansible_facts', {}).get('network_os')))
            modules.extend([
                connection_map.get(network_os or self._connection._load_name,
                                   'setup')
            ])
            modules.pop(modules.index('smart'))

        failed = {}
        skipped = {}

        if parallel is None and len(modules) >= 1:
            parallel = True
        else:
            parallel = boolean(parallel)

        if parallel:
            # serially execute each module
            for fact_module in modules:
                # just one module, no need for fancy async
                mod_args = self._get_module_args(fact_module, task_vars)
                res = self._execute_module(module_name=fact_module,
                                           module_args=mod_args,
                                           task_vars=task_vars,
                                           wrap_async=False)
                if res.get('failed', False):
                    failed[fact_module] = res
                elif res.get('skipped', False):
                    skipped[fact_module] = res
                else:
                    result = self._combine_task_result(result, res)

            self._remove_tmp_path(self._connection._shell.tmpdir)
        else:
            # do it async
            jobs = {}
            for fact_module in modules:
                mod_args = self._get_module_args(fact_module, task_vars)
                self._display.vvvv("Running %s" % fact_module)
                jobs[fact_module] = (self._execute_module(
                    module_name=fact_module,
                    module_args=mod_args,
                    task_vars=task_vars,
                    wrap_async=True))

            while jobs:
                for module in jobs:
                    poll_args = {
                        'jid': jobs[module]['ansible_job_id'],
                        '_async_dir':
                        os.path.dirname(jobs[module]['results_file'])
                    }
                    res = self._execute_module(module_name='async_status',
                                               module_args=poll_args,
                                               task_vars=task_vars,
                                               wrap_async=False)
                    if res.get('finished', 0) == 1:
                        if res.get('failed', False):
                            failed[module] = res
                        elif res.get('skipped', False):
                            skipped[module] = res
                        else:
                            result = self._combine_task_result(result, res)
                        del jobs[module]
                        break
                    else:
                        time.sleep(0.1)
                else:
                    time.sleep(0.5)

        if skipped:
            result['msg'] = "The following modules were skipped: %s\n" % (
                ', '.join(skipped.keys()))
            result['skipped_modules'] = skipped
            if len(skipped) == len(modules):
                result['skipped'] = True

        if failed:
            result['failed'] = True
            result['msg'] = "The following modules failed to execute: %s\n" % (
                ', '.join(failed.keys()))
            result['failed_modules'] = failed

        # tell executor facts were gathered
        result['ansible_facts']['_ansible_facts_gathered'] = True

        # hack to keep --verbose from showing all the setup module result
        result['_ansible_verbose_override'] = True

        return result
Exemplo n.º 46
0
    def post_validate(self, templar):
        '''
        we can't tell that everything is of the right type until we have
        all the variables.  Run basic types (from isa) as well as
        any _post_validate_<foo> functions.
        '''

        # save the omit value for later checking
        omit_value = templar._available_variables.get('omit')

        for (name, attribute) in iteritems(self._valid_attrs):

            if getattr(self, name) is None:
                if not attribute.required:
                    continue
                else:
                    raise AnsibleParserError("the field '%s' is required but was not set" % name)
            elif not attribute.always_post_validate and self.__class__.__name__ not in ('Task', 'Handler', 'PlayContext'):
                # Intermediate objects like Play() won't have their fields validated by
                # default, as their values are often inherited by other objects and validated
                # later, so we don't want them to fail out early
                continue

            try:
                # Run the post-validator if present. These methods are responsible for
                # using the given templar to template the values, if required.
                method = getattr(self, '_post_validate_%s' % name, None)
                if method:
                    value = method(attribute, getattr(self, name), templar)
                elif attribute.isa == 'class':
                    value = getattr(self, name)
                else:
                    # if the attribute contains a variable, template it now
                    value = templar.template(getattr(self, name))

                # if this evaluated to the omit value, set the value back to
                # the default specified in the FieldAttribute and move on
                if omit_value is not None and value == omit_value:
                    setattr(self, name, attribute.default)
                    continue

                # and make sure the attribute is of the type it should be
                if value is not None:
                    if attribute.isa == 'string':
                        value = to_text(value)
                    elif attribute.isa == 'int':
                        value = int(value)
                    elif attribute.isa == 'float':
                        value = float(value)
                    elif attribute.isa == 'bool':
                        value = boolean(value, strict=False)
                    elif attribute.isa == 'percent':
                        # special value, which may be an integer or float
                        # with an optional '%' at the end
                        if isinstance(value, string_types) and '%' in value:
                            value = value.replace('%', '')
                        value = float(value)
                    elif attribute.isa in ('list', 'barelist'):
                        if value is None:
                            value = []
                        elif not isinstance(value, list):
                            if isinstance(value, string_types) and attribute.isa == 'barelist':
                                display.deprecated(
                                    "Using comma separated values for a list has been deprecated. "
                                    "You should instead use the correct YAML syntax for lists. "
                                )
                                value = value.split(',')
                            else:
                                value = [value]
                        if attribute.listof is not None:
                            for item in value:
                                if not isinstance(item, attribute.listof):
                                    raise AnsibleParserError("the field '%s' should be a list of %s, "
                                                             "but the item '%s' is a %s" % (name, attribute.listof, item, type(item)), obj=self.get_ds())
                                elif attribute.required and attribute.listof == string_types:
                                    if item is None or item.strip() == "":
                                        raise AnsibleParserError("the field '%s' is required, and cannot have empty values" % (name,), obj=self.get_ds())
                    elif attribute.isa == 'set':
                        if value is None:
                            value = set()
                        elif not isinstance(value, (list, set)):
                            if isinstance(value, string_types):
                                value = value.split(',')
                            else:
                                # Making a list like this handles strings of
                                # text and bytes properly
                                value = [value]
                        if not isinstance(value, set):
                            value = set(value)
                    elif attribute.isa == 'dict':
                        if value is None:
                            value = dict()
                        elif not isinstance(value, dict):
                            raise TypeError("%s is not a dictionary" % value)
                    elif attribute.isa == 'class':
                        if not isinstance(value, attribute.class_type):
                            raise TypeError("%s is not a valid %s (got a %s instead)" % (name, attribute.class_type, type(value)))
                        value.post_validate(templar=templar)

                # and assign the massaged value back to the attribute field
                setattr(self, name, value)
            except (TypeError, ValueError) as e:
                raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s."
                                         "The error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds(), orig_exc=e)
            except (AnsibleUndefinedVariable, UndefinedError) as e:
                if templar._fail_on_undefined_errors and name != 'name':
                    if name == 'args':
                        msg = "The task includes an option with an undefined variable. The error was: %s" % (to_native(e))
                    else:
                        msg = "The field '%s' has an invalid value, which includes an undefined variable. The error was: %s" % (name, to_native(e))
                    raise AnsibleParserError(msg, obj=self.get_ds(), orig_exc=e)

        self._finalized = True
Exemplo n.º 47
0
    def _file_transport_command(self, in_path, out_path, sftp_action):
        # scp and sftp require square brackets for IPv6 addresses, but
        # accept them for hostnames and IPv4 addresses too.
        host = '[%s]' % self.host

        # Transfer methods to try
        methods = []

        # Use the transfer_method option if set, otherwise use scp_if_ssh
        ssh_transfer_method = self._play_context.ssh_transfer_method
        if ssh_transfer_method is not None:
            if not (ssh_transfer_method in ('smart', 'sftp', 'scp', 'piped')):
                raise AnsibleOptionsError('transfer_method needs to be one of [smart|sftp|scp|piped]')
            if ssh_transfer_method == 'smart':
                methods = ['sftp', 'scp', 'piped']
            else:
                methods = [ssh_transfer_method]
        else:
            # since this can be a non-bool now, we need to handle it correctly
            scp_if_ssh = C.DEFAULT_SCP_IF_SSH
            if not isinstance(scp_if_ssh, bool):
                scp_if_ssh = scp_if_ssh.lower()
                if scp_if_ssh in BOOLEANS:
                    scp_if_ssh = boolean(scp_if_ssh, strict=False)
                elif scp_if_ssh != 'smart':
                    raise AnsibleOptionsError('scp_if_ssh needs to be one of [smart|True|False]')
            if scp_if_ssh == 'smart':
                methods = ['sftp', 'scp', 'piped']
            elif scp_if_ssh is True:
                methods = ['scp']
            else:
                methods = ['sftp']

        for method in methods:
            returncode = stdout = stderr = None
            if method == 'sftp':
                cmd = self._build_command('sftp', to_bytes(host))
                in_data = u"{0} {1} {2}\n".format(sftp_action, shlex_quote(in_path), shlex_quote(out_path))
                in_data = to_bytes(in_data, nonstring='passthru')
                (returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
            elif method == 'scp':
                if sftp_action == 'get':
                    cmd = self._build_command('scp', u'{0}:{1}'.format(host, shlex_quote(in_path)), out_path)
                else:
                    cmd = self._build_command('scp', in_path, u'{0}:{1}'.format(host, shlex_quote(out_path)))
                in_data = None
                (returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
            elif method == 'piped':
                if sftp_action == 'get':
                    # we pass sudoable=False to disable pty allocation, which
                    # would end up mixing stdout/stderr and screwing with newlines
                    (returncode, stdout, stderr) = self.exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), sudoable=False)
                    out_file = open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+')
                    out_file.write(stdout)
                    out_file.close()
                else:
                    in_data = open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb').read()
                    in_data = to_bytes(in_data, nonstring='passthru')
                    (returncode, stdout, stderr) = self.exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), in_data=in_data)

            # Check the return code and rollover to next method if failed
            if returncode == 0:
                return (returncode, stdout, stderr)
            else:
                # If not in smart mode, the data will be printed by the raise below
                if len(methods) > 1:
                    display.warning(msg='%s transfer mechanism failed on %s. Use ANSIBLE_DEBUG=1 to see detailed information' % (method, host))
                    display.debug(msg='%s' % to_native(stdout))
                    display.debug(msg='%s' % to_native(stderr))

        if returncode == 255:
            raise AnsibleConnectionFailure("Failed to connect to the host via %s: %s" % (method, to_native(stderr)))
        else:
            raise AnsibleError("failed to transfer file to %s %s:\n%s\n%s" %
                               (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
Exemplo n.º 48
0
    def run(self, tmp=None, task_vars=None):
        ''' handler for template operations '''

        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)

        source = self._task.args.get('src', None)
        dest = self._task.args.get('dest', None)
        force = boolean(self._task.args.get('force', True), strict=False)
        follow = boolean(self._task.args.get('follow', False), strict=False)
        state = self._task.args.get('state', None)
        newline_sequence = self._task.args.get('newline_sequence', self.DEFAULT_NEWLINE_SEQUENCE)
        variable_start_string = self._task.args.get('variable_start_string', None)
        variable_end_string = self._task.args.get('variable_end_string', None)
        block_start_string = self._task.args.get('block_start_string', None)
        block_end_string = self._task.args.get('block_end_string', None)
        trim_blocks = self._task.args.get('trim_blocks', None)

        wrong_sequences = ["\\n", "\\r", "\\r\\n"]
        allowed_sequences = ["\n", "\r", "\r\n"]

        # We need to convert unescaped sequences to proper escaped sequences for Jinja2
        if newline_sequence in wrong_sequences:
            newline_sequence = allowed_sequences[wrong_sequences.index(newline_sequence)]

        if state is not None:
            result['failed'] = True
            result['msg'] = "'state' cannot be specified on a template"
        elif source is None or dest is None:
            result['failed'] = True
            result['msg'] = "src and dest are required"
        elif newline_sequence not in allowed_sequences:
            result['failed'] = True
            result['msg'] = "newline_sequence needs to be one of: \n, \r or \r\n"
        else:
            try:
                source = self._find_needle('templates', source)
            except AnsibleError as e:
                result['failed'] = True
                result['msg'] = to_text(e)

        if 'failed' in result:
            return result

        # Get vault decrypted tmp file
        try:
            tmp_source = self._loader.get_real_file(source)
        except AnsibleFileNotFound as e:
            result['failed'] = True
            result['msg'] = "could not find src=%s, %s" % (source, e)
            self._remove_tmp_path(tmp)
            return result

        # template the source data locally & get ready to transfer
        try:
            with open(tmp_source, 'r') as f:
                template_data = to_text(f.read())

            # set jinja2 internal search path for includes
            searchpath = task_vars.get('ansible_search_path', [])
            searchpath.extend([self._loader._basedir, os.path.dirname(source)])

            # We want to search into the 'templates' subdir of each search path in
            # addition to our original search paths.
            newsearchpath = []
            for p in searchpath:
                newsearchpath.append(os.path.join(p, 'templates'))
                newsearchpath.append(p)
            searchpath = newsearchpath

            self._templar.environment.loader.searchpath = searchpath
            self._templar.environment.newline_sequence = newline_sequence
            if block_start_string is not None:
                self._templar.environment.block_start_string = block_start_string
            if block_end_string is not None:
                self._templar.environment.block_end_string = block_end_string
            if variable_start_string is not None:
                self._templar.environment.variable_start_string = variable_start_string
            if variable_end_string is not None:
                self._templar.environment.variable_end_string = variable_end_string
            if trim_blocks is not None:
                self._templar.environment.trim_blocks = bool(trim_blocks)

            # add ansible 'template' vars
            temp_vars = task_vars.copy()
            temp_vars.update(generate_ansible_template_vars(source))

            old_vars = self._templar._available_variables
            self._templar.set_available_variables(temp_vars)
            resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
            self._templar.set_available_variables(old_vars)
        except Exception as e:
            result['failed'] = True
            result['msg'] = "%s: %s" % (type(e).__name__, to_text(e))
            return result
        finally:
            self._loader.cleanup_tmp_file(tmp_source)

        new_task = self._task.copy()
        new_task.args.pop('newline_sequence', None)
        new_task.args.pop('block_start_string', None)
        new_task.args.pop('block_end_string', None)
        new_task.args.pop('variable_start_string', None)
        new_task.args.pop('variable_end_string', None)
        new_task.args.pop('trim_blocks', None)
        try:
            tempdir = tempfile.mkdtemp()
            result_file = os.path.join(tempdir, os.path.basename(source))
            with open(result_file, 'wb') as f:
                f.write(to_bytes(resultant, errors='surrogate_or_strict'))

            new_task.args.update(
                dict(
                    src=result_file,
                    dest=dest,
                    follow=follow,
                ),
            )
            copy_action = self._shared_loader_obj.action_loader.get('copy',
                                                                    task=new_task,
                                                                    connection=self._connection,
                                                                    play_context=self._play_context,
                                                                    loader=self._loader,
                                                                    templar=self._templar,
                                                                    shared_loader_obj=self._shared_loader_obj)
            result.update(copy_action.run(task_vars=task_vars))
        finally:
            shutil.rmtree(tempdir)

        return result
Exemplo n.º 49
0
    def run(self, terms, variables, **kwargs):

        def _raise_terms_error(msg=""):
            raise AnsibleError(
                "subelements lookup expects a list of two or three items, " + msg)

        terms[0] = listify_lookup_plugin_terms(terms[0], templar=self._templar, loader=self._loader)

        # check lookup terms - check number of terms
        if not isinstance(terms, list) or not 2 <= len(terms) <= 3:
            _raise_terms_error()

        # first term should be a list (or dict), second a string holding the subkey
        if not isinstance(terms[0], (list, dict)) or not isinstance(terms[1], string_types):
            _raise_terms_error("first a dict or a list, second a string pointing to the subkey")
        subelements = terms[1].split(".")

        if isinstance(terms[0], dict):  # convert to list:
            if terms[0].get('skipped', False) is not False:
                # the registered result was completely skipped
                return []
            elementlist = []
            for key in terms[0]:
                elementlist.append(terms[0][key])
        else:
            elementlist = terms[0]

        # check for optional flags in third term
        flags = {}
        if len(terms) == 3:
            flags = terms[2]
        if not isinstance(flags, dict) and not all([isinstance(key, string_types) and key in FLAGS for key in flags]):
            _raise_terms_error("the optional third item must be a dict with flags %s" % FLAGS)

        # build_items
        ret = []
        for item0 in elementlist:
            if not isinstance(item0, dict):
                raise AnsibleError("subelements lookup expects a dictionary, got '%s'" % item0)
            if item0.get('skipped', False) is not False:
                # this particular item is to be skipped
                continue

            skip_missing = boolean(flags.get('skip_missing', False), strict=False)
            subvalue = item0
            lastsubkey = False
            sublist = []
            for subkey in subelements:
                if subkey == subelements[-1]:
                    lastsubkey = True
                if subkey not in subvalue:
                    if skip_missing:
                        continue
                    else:
                        raise AnsibleError("could not find '%s' key in iterated item '%s'" % (subkey, subvalue))
                if not lastsubkey:
                    if not isinstance(subvalue[subkey], dict):
                        if skip_missing:
                            continue
                        else:
                            raise AnsibleError("the key %s should point to a dictionary, got '%s'" % (subkey, subvalue[subkey]))
                    else:
                        subvalue = subvalue[subkey]
                else:  # lastsubkey
                    if not isinstance(subvalue[subkey], list):
                        raise AnsibleError("the key %s should point to a list, got '%s'" % (subkey, subvalue[subkey]))
                    else:
                        sublist = subvalue.pop(subkey, [])
            for item1 in sublist:
                ret.append((item0, item1))

        return ret
Exemplo n.º 50
0
def convert_to_boolean(value: Any) -> bool:
    """Use Ansible to convert something to a boolean."""
    return bool(boolean(value))
Exemplo n.º 51
0
    def run(self, tmp=None, task_vars=None):
        ''' handler for file transfer operations '''
        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)

        source = self._task.args.get('src', None)
        content = self._task.args.get('content', None)
        dest = self._task.args.get('dest', None)
        remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
        follow = boolean(self._task.args.get('follow', False), strict=False)
        force = boolean(self._task.args.get('force', True), strict=False)

        result['src'] = source
        result['dest'] = dest

        result['failed'] = True
        if (source is None and content is None) or dest is None:
            result['msg'] = "src (or content) and dest are required"
        elif source is not None and content is not None:
            result['msg'] = "src and content are mutually exclusive"
        elif content is not None and dest is not None and (
                dest.endswith(os.path.sep) or dest.endswith(self.WIN_PATH_SEPARATOR)):
            result['msg'] = "dest must be a file if content is defined"
        else:
            del result['failed']

        if result.get('failed'):
            return result

        # If content is defined make a temp file and write the content into it
        content_tempfile = None
        if content is not None:
            try:
                # if content comes to us as a dict it should be decoded json.
                # We need to encode it back into a string and write it out
                if isinstance(content, dict) or isinstance(content, list):
                    content_tempfile = self._create_content_tempfile(json.dumps(content))
                else:
                    content_tempfile = self._create_content_tempfile(content)
                source = content_tempfile
            except Exception as err:
                result['failed'] = True
                result['msg'] = "could not write content temp file: %s" % to_native(err)
                return result
        # all actions should occur on the remote server, run win_copy module
        elif remote_src:
            new_module_args = self._task.args.copy()
            new_module_args.update(
                dict(
                    mode="remote",
                    dest=dest,
                    src=source,
                    force=force
                )
            )
            new_module_args.pop('content', None)
            result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))
            return result
        # find_needle returns a path that may not have a trailing slash on a
        # directory so we need to find that out first and append at the end
        else:
            trailing_slash = source.endswith(os.path.sep)
            try:
                # find in expected paths
                source = self._find_needle('files', source)
            except AnsibleError as e:
                result['failed'] = True
                result['msg'] = to_text(e)
                result['exception'] = traceback.format_exc()
                return result

            if trailing_slash != source.endswith(os.path.sep):
                if source[-1] == os.path.sep:
                    source = source[:-1]
                else:
                    source = source + os.path.sep

        # A list of source file tuples (full_path, relative_path) which will try to copy to the destination
        source_files = {'files': [], 'directories': [], 'symlinks': []}

        # If source is a directory populate our list else source is a file and translate it to a tuple.
        if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')):
            result['operation'] = 'folder_copy'

            # Get a list of the files we want to replicate on the remote side
            source_files = _walk_dirs(source, local_follow=follow,
                                      trailing_slash_detector=self._connection._shell.path_has_trailing_slash,
                                      checksum_check=force)

            # If it's recursive copy, destination is always a dir,
            # explicitly mark it so (note - win_copy module relies on this).
            if not self._connection._shell.path_has_trailing_slash(dest):
                dest = "%s%s" % (dest, self.WIN_PATH_SEPARATOR)

            check_dest = dest
        # Source is a file, add details to source_files dict
        else:
            result['operation'] = 'file_copy'

            original_basename = os.path.basename(source)
            result['original_basename'] = original_basename

            # check if dest ends with / or \ and append source filename to dest
            if self._connection._shell.path_has_trailing_slash(dest):
                check_dest = dest
                filename = original_basename
                result['dest'] = self._connection._shell.join_path(dest, filename)
            else:
                # replace \\ with / so we can use os.path to get the filename or dirname
                unix_path = dest.replace(self.WIN_PATH_SEPARATOR, os.path.sep)
                filename = os.path.basename(unix_path)
                check_dest = os.path.dirname(unix_path)

            file_checksum = _get_local_checksum(force, source)
            source_files['files'].append(
                dict(
                    src=source,
                    dest=filename,
                    checksum=file_checksum
                )
            )
            result['checksum'] = file_checksum
            result['size'] = os.path.getsize(to_bytes(source, errors='surrogate_or_strict'))

        # find out the files/directories/symlinks that we need to copy to the server
        query_args = self._task.args.copy()
        query_args.update(
            dict(
                mode="query",
                dest=check_dest,
                force=force,
                files=source_files['files'],
                directories=source_files['directories'],
                symlinks=source_files['symlinks']
            )
        )

        query_args.pop('content', None)
        query_return = self._execute_module(module_args=query_args, task_vars=task_vars)

        if query_return.get('failed') is True:
            result.update(query_return)
            return result

        if len(query_return['files']) == 1 and len(query_return['directories']) == 0:
            # we only need to copy 1 file, don't mess around with zips
            file_src = query_return['files'][0]['src']
            file_dest = query_return['files'][0]['dest']
            copy_result = self._copy_single_file(file_src, dest, file_dest, task_vars)

            result['changed'] = True
            if copy_result.get('failed') is True:
                result['failed'] = True
                result['msg'] = "failed to copy file %s: %s" % (file_src, copy_result['msg'])
        elif len(query_return['files']) > 0 or len(query_return['directories']) > 0:
            # either multiple files or directories need to be copied, compress
            # to a zip and 'explode' the zip on the server
            # TODO: handle symlinks
            result.update(self._copy_zip_file(dest, source_files['files'], source_files['directories'], task_vars))
            result['changed'] = True
        else:
            # no operations need to occur
            result['failed'] = False
            result['changed'] = False

        # remove the content temp file if it was created
        self._remove_tempfile_if_content_defined(content, content_tempfile)
        return result
Exemplo n.º 52
0
    def _build_kwargs(self):
        self._psrp_host = self.get_option('remote_addr')
        self._psrp_user = self.get_option('remote_user')
        self._psrp_pass = self.get_option('remote_password')

        protocol = self.get_option('protocol')
        port = self.get_option('port')
        if protocol is None and port is None:
            protocol = 'https'
            port = 5986
        elif protocol is None:
            protocol = 'https' if int(port) != 5985 else 'http'
        elif port is None:
            port = 5986 if protocol == 'https' else 5985

        self._psrp_protocol = protocol
        self._psrp_port = int(port)

        self._psrp_path = self.get_option('path')
        self._psrp_auth = self.get_option('auth')
        # cert validation can either be a bool or a path to the cert
        cert_validation = self.get_option('cert_validation')
        cert_trust_path = self.get_option('ca_cert')
        if cert_validation == 'ignore':
            self._psrp_cert_validation = False
        elif cert_trust_path is not None:
            self._psrp_cert_validation = cert_trust_path
        else:
            self._psrp_cert_validation = True

        self._psrp_connection_timeout = self.get_option(
            'connection_timeout')  # Can be None
        self._psrp_read_timeout = self.get_option(
            'read_timeout')  # Can be None
        self._psrp_message_encryption = self.get_option('message_encryption')
        self._psrp_proxy = self.get_option('proxy')
        self._psrp_ignore_proxy = boolean(self.get_option('ignore_proxy'))
        self._psrp_operation_timeout = int(
            self.get_option('operation_timeout'))
        self._psrp_max_envelope_size = int(
            self.get_option('max_envelope_size'))
        self._psrp_configuration_name = self.get_option('configuration_name')
        self._psrp_reconnection_retries = int(
            self.get_option('reconnection_retries'))
        self._psrp_reconnection_backoff = float(
            self.get_option('reconnection_backoff'))

        self._psrp_certificate_key_pem = self.get_option('certificate_key_pem')
        self._psrp_certificate_pem = self.get_option('certificate_pem')
        self._psrp_credssp_auth_mechanism = self.get_option(
            'credssp_auth_mechanism')
        self._psrp_credssp_disable_tlsv1_2 = self.get_option(
            'credssp_disable_tlsv1_2')
        self._psrp_credssp_minimum_version = self.get_option(
            'credssp_minimum_version')
        self._psrp_negotiate_send_cbt = self.get_option('negotiate_send_cbt')
        self._psrp_negotiate_delegate = self.get_option('negotiate_delegate')
        self._psrp_negotiate_hostname_override = self.get_option(
            'negotiate_hostname_override')
        self._psrp_negotiate_service = self.get_option('negotiate_service')

        supported_args = []
        for auth_kwarg in AUTH_KWARGS.values():
            supported_args.extend(auth_kwarg)
        extra_args = set([
            v.replace('ansible_psrp_', '') for v in self.get_option('_extras')
        ])
        unsupported_args = extra_args.difference(supported_args)

        for arg in unsupported_args:
            display.warning("ansible_psrp_%s is unsupported by the current "
                            "psrp version installed" % arg)

        self._psrp_conn_kwargs = dict(
            server=self._psrp_host,
            port=self._psrp_port,
            username=self._psrp_user,
            password=self._psrp_pass,
            ssl=self._psrp_protocol == 'https',
            path=self._psrp_path,
            auth=self._psrp_auth,
            cert_validation=self._psrp_cert_validation,
            connection_timeout=self._psrp_connection_timeout,
            encryption=self._psrp_message_encryption,
            proxy=self._psrp_proxy,
            no_proxy=self._psrp_ignore_proxy,
            max_envelope_size=self._psrp_max_envelope_size,
            operation_timeout=self._psrp_operation_timeout,
            certificate_key_pem=self._psrp_certificate_key_pem,
            certificate_pem=self._psrp_certificate_pem,
            credssp_auth_mechanism=self._psrp_credssp_auth_mechanism,
            credssp_disable_tlsv1_2=self._psrp_credssp_disable_tlsv1_2,
            credssp_minimum_version=self._psrp_credssp_minimum_version,
            negotiate_send_cbt=self._psrp_negotiate_send_cbt,
            negotiate_delegate=self._psrp_negotiate_delegate,
            negotiate_hostname_override=self._psrp_negotiate_hostname_override,
            negotiate_service=self._psrp_negotiate_service,
        )

        # Check if PSRP version supports newer read_timeout argument (needs pypsrp 0.3.0+)
        if hasattr(pypsrp,
                   'FEATURES') and 'wsman_read_timeout' in pypsrp.FEATURES:
            self._psrp_conn_kwargs['read_timeout'] = self._psrp_read_timeout
        elif self._psrp_read_timeout is not None:
            display.warning(
                "ansible_psrp_read_timeout is unsupported by the current psrp version installed, "
                "using ansible_psrp_connection_timeout value for read_timeout instead."
            )

        # Check if PSRP version supports newer reconnection_retries argument (needs pypsrp 0.3.0+)
        if hasattr(pypsrp,
                   'FEATURES') and 'wsman_reconnections' in pypsrp.FEATURES:
            self._psrp_conn_kwargs[
                'reconnection_retries'] = self._psrp_reconnection_retries
            self._psrp_conn_kwargs[
                'reconnection_backoff'] = self._psrp_reconnection_backoff
        else:
            if self._psrp_reconnection_retries is not None:
                display.warning(
                    "ansible_psrp_reconnection_retries is unsupported by the current psrp version installed."
                )
            if self._psrp_reconnection_backoff is not None:
                display.warning(
                    "ansible_psrp_reconnection_backoff is unsupported by the current psrp version installed."
                )

        # add in the extra args that were set
        for arg in extra_args.intersection(supported_args):
            option = self.get_option('_extras')['ansible_psrp_%s' % arg]
            self._psrp_conn_kwargs[arg] = option
Exemplo n.º 53
0
def _list_into_cache(regions):
    groups = collections.defaultdict(list)
    hostvars = collections.defaultdict(dict)
    images = {}
    cbs_attachments = collections.defaultdict(dict)

    prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta')

    try:
        # Ansible 2.3+
        networks = get_config(p, 'rax', 'access_network',
                              'RAX_ACCESS_NETWORK', 'public', value_type='list')
    except TypeError:
        # Ansible 2.2.x and below
        # pylint: disable=unexpected-keyword-arg
        networks = get_config(p, 'rax', 'access_network',
                              'RAX_ACCESS_NETWORK', 'public', islist=True)
    try:
        try:
            # Ansible 2.3+
            ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
                                              'RAX_ACCESS_IP_VERSION', 4, value_type='list'))
        except TypeError:
            # Ansible 2.2.x and below
            # pylint: disable=unexpected-keyword-arg
            ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
                                              'RAX_ACCESS_IP_VERSION', 4, islist=True))
    except:
        ip_versions = [4]
    else:
        ip_versions = [v for v in ip_versions if v in [4, 6]]
        if not ip_versions:
            ip_versions = [4]

    # Go through all the regions looking for servers
    for region in regions:
        # Connect to the region
        cs = pyrax.connect_to_cloudservers(region=region)
        if cs is None:
            warnings.warn(
                'Connecting to Rackspace region "%s" has caused Pyrax to '
                'return None. Is this a valid region?' % region,
                RuntimeWarning)
            continue
        for server in cs.servers.list():
            # Create a group on region
            groups[region].append(server.name)

            # Check if group metadata key in servers' metadata
            group = server.metadata.get('group')
            if group:
                groups[group].append(server.name)

            for extra_group in server.metadata.get('groups', '').split(','):
                if extra_group:
                    groups[extra_group].append(server.name)

            # Add host metadata
            for key, value in to_dict(server).items():
                hostvars[server.name][key] = value

            hostvars[server.name]['rax_region'] = region

            for key, value in iteritems(server.metadata):
                groups['%s_%s_%s' % (prefix, key, value)].append(server.name)

            groups['instance-%s' % server.id].append(server.name)
            groups['flavor-%s' % server.flavor['id']].append(server.name)

            # Handle boot from volume
            if not server.image:
                if not cbs_attachments[region]:
                    cbs = pyrax.connect_to_cloud_blockstorage(region)
                    for vol in cbs.list():
                        if boolean(vol.bootable, strict=False):
                            for attachment in vol.attachments:
                                metadata = vol.volume_image_metadata
                                server_id = attachment['server_id']
                                cbs_attachments[region][server_id] = {
                                    'id': metadata['image_id'],
                                    'name': slugify(metadata['image_name'])
                                }
                image = cbs_attachments[region].get(server.id)
                if image:
                    server.image = {'id': image['id']}
                    hostvars[server.name]['rax_image'] = server.image
                    hostvars[server.name]['rax_boot_source'] = 'volume'
                    images[image['id']] = image['name']
            else:
                hostvars[server.name]['rax_boot_source'] = 'local'

            try:
                imagegroup = 'image-%s' % images[server.image['id']]
                groups[imagegroup].append(server.name)
                groups['image-%s' % server.image['id']].append(server.name)
            except KeyError:
                try:
                    image = cs.images.get(server.image['id'])
                except cs.exceptions.NotFound:
                    groups['image-%s' % server.image['id']].append(server.name)
                else:
                    images[image.id] = image.human_id
                    groups['image-%s' % image.human_id].append(server.name)
                    groups['image-%s' % server.image['id']].append(server.name)

            # And finally, add an IP address
            ansible_ssh_host = None
            # use accessIPv[46] instead of looping address for 'public'
            for network_name in networks:
                if ansible_ssh_host:
                    break
                if network_name == 'public':
                    for version_name in ip_versions:
                        if ansible_ssh_host:
                            break
                        if version_name == 6 and server.accessIPv6:
                            ansible_ssh_host = server.accessIPv6
                        elif server.accessIPv4:
                            ansible_ssh_host = server.accessIPv4
                if not ansible_ssh_host:
                    addresses = server.addresses.get(network_name, [])
                    for address in addresses:
                        for version_name in ip_versions:
                            if ansible_ssh_host:
                                break
                            if address.get('version') == version_name:
                                ansible_ssh_host = address.get('addr')
                                break
            if ansible_ssh_host:
                hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host

    if hostvars:
        groups['_meta'] = {'hostvars': hostvars}

    with open(get_cache_file_path(regions), 'w') as cache_file:
        json.dump(groups, cache_file)
Exemplo n.º 54
0
def mk_boolean(value):
    ''' moved to module_utils'''
    _deprecated(
        'ansible.constants.mk_boolean() is deprecated.  Use ansible.module_utils.parsing.convert_bool.boolean() instead'
    )
    return boolean(value, strict=False)
Exemplo n.º 55
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            boot=dict(type='bool', default=True),
            dump=dict(type='str', default='0'),
            fstab=dict(type='str'),
            fstype=dict(type='str'),
            path=dict(type='path', required=True, aliases=['name']),
            opts=dict(type='str'),
            passno=dict(type='str', no_log=False, default='0'),
            src=dict(type='path'),
            backup=dict(type='bool', default=False),
            state=dict(type='str',
                       required=True,
                       choices=[
                           'absent', 'mounted', 'present', 'unmounted',
                           'remounted'
                       ]),
        ),
        supports_check_mode=True,
        required_if=(
            ['state', 'mounted', ['src', 'fstype']],
            ['state', 'present', ['src', 'fstype']],
        ),
    )

    # solaris args:
    #   name, src, fstype, opts, boot, passno, state, fstab=/etc/vfstab
    # linux args:
    #   name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab
    # Note: Do not modify module.params['fstab'] as we need to know if the user
    # explicitly specified it in mount() and remount()
    if platform.system().lower() == 'sunos':
        args = dict(name=module.params['path'],
                    opts='-',
                    passno='-',
                    fstab=module.params['fstab'],
                    boot='yes' if module.params['boot'] else 'no',
                    warnings=[])
        if args['fstab'] is None:
            args['fstab'] = '/etc/vfstab'
    else:
        args = dict(name=module.params['path'],
                    opts='defaults',
                    dump='0',
                    passno='0',
                    fstab=module.params['fstab'],
                    boot='yes',
                    warnings=[])
        if args['fstab'] is None:
            args['fstab'] = '/etc/fstab'

        # FreeBSD doesn't have any 'default' so set 'rw' instead
        if platform.system() == 'FreeBSD':
            args['opts'] = 'rw'

    args['backup_file'] = ""
    linux_mounts = []

    # Cache all mounts here in order we have consistent results if we need to
    # call is_bind_mounted() multiple times
    if platform.system() == 'Linux':
        linux_mounts = get_linux_mounts(module)

        if linux_mounts is None:
            args['warnings'].append('Cannot open file /proc/self/mountinfo.'
                                    ' Bind mounts might be misinterpreted.')

    # Override defaults with user specified params
    for key in ('src', 'fstype', 'passno', 'opts', 'dump', 'fstab'):
        if module.params[key] is not None:
            args[key] = module.params[key]
    if platform.system().lower() == 'linux' or platform.system().lower(
    ).endswith('bsd'):
        # Linux, FreeBSD, NetBSD and OpenBSD have 'noauto' as mount option to
        # handle mount on boot.  To avoid mount option conflicts, if 'noauto'
        # specified in 'opts',  mount module will ignore 'boot'.
        opts = args['opts'].split(',')
        if 'noauto' in opts:
            args['warnings'].append(
                "Ignore the 'boot' due to 'opts' contains 'noauto'.")
        elif not module.params['boot']:
            args['boot'] = 'no'
            if 'defaults' in opts:
                args['warnings'].append(
                    "Ignore the 'boot' due to 'opts' contains 'defaults'.")
            else:
                opts.append('noauto')
                args['opts'] = ','.join(opts)

    # If fstab file does not exist, we first need to create it. This mainly
    # happens when fstab option is passed to the module.
    if not os.path.exists(args['fstab']):
        if not os.path.exists(os.path.dirname(args['fstab'])):
            os.makedirs(os.path.dirname(args['fstab']))
        try:
            open(args['fstab'], 'a').close()
        except PermissionError as e:
            module.fail_json(msg="Failed to open %s due to permission issue" %
                             args['fstab'])
        except Exception as e:
            module.fail_json(msg="Failed to open %s due to %s" %
                             (args['fstab'], to_native(e)))

    # absent:
    #   Remove from fstab and unmounted.
    # unmounted:
    #   Do not change fstab state, but unmount.
    # present:
    #   Add to fstab, do not change mount state.
    # mounted:
    #   Add to fstab if not there and make sure it is mounted. If it has
    #   changed in fstab then remount it.

    state = module.params['state']
    name = module.params['path']
    changed = False

    if state == 'absent':
        name, changed = unset_mount(module, args)

        if changed and not module.check_mode:
            if ismount(name) or is_bind_mounted(module, linux_mounts, name):
                res, msg = umount(module, name)

                if res:
                    module.fail_json(msg="Error unmounting %s: %s" %
                                     (name, msg))

            if os.path.exists(name):
                try:
                    os.rmdir(name)
                except (OSError, IOError) as e:
                    module.fail_json(msg="Error rmdir %s: %s" %
                                     (name, to_native(e)))
    elif state == 'unmounted':
        if ismount(name) or is_bind_mounted(module, linux_mounts, name):
            if not module.check_mode:
                res, msg = umount(module, name)

                if res:
                    module.fail_json(msg="Error unmounting %s: %s" %
                                     (name, msg))

            changed = True
    elif state == 'mounted':
        dirs_created = []
        if not os.path.exists(name) and not module.check_mode:
            try:
                # Something like mkdir -p but with the possibility to undo.
                # Based on some copy-paste from the "file" module.
                curpath = ''
                for dirname in name.strip('/').split('/'):
                    curpath = '/'.join([curpath, dirname])
                    # Remove leading slash if we're creating a relative path
                    if not os.path.isabs(name):
                        curpath = curpath.lstrip('/')

                    b_curpath = to_bytes(curpath, errors='surrogate_or_strict')
                    if not os.path.exists(b_curpath):
                        try:
                            os.mkdir(b_curpath)
                            dirs_created.append(b_curpath)
                        except OSError as ex:
                            # Possibly something else created the dir since the os.path.exists
                            # check above. As long as it's a dir, we don't need to error out.
                            if not (ex.errno == errno.EEXIST
                                    and os.path.isdir(b_curpath)):
                                raise

            except (OSError, IOError) as e:
                module.fail_json(msg="Error making dir %s: %s" %
                                 (name, to_native(e)))

        name, backup_lines, changed = _set_mount_save_old(module, args)
        res = 0

        if (ismount(name) or is_bind_mounted(module, linux_mounts, name,
                                             args['src'], args['fstype'])):
            if changed and not module.check_mode:
                res, msg = remount(module, args)
                changed = True
        else:
            changed = True

            if not module.check_mode:
                res, msg = mount(module, args)

        if res:
            # Not restoring fstab after a failed mount was reported as a bug,
            # ansible/ansible#59183
            # A non-working fstab entry may break the system at the reboot,
            # so undo all the changes if possible.
            try:
                write_fstab(module, backup_lines, args['fstab'])
            except Exception:
                pass

            try:
                for dirname in dirs_created[::-1]:
                    os.rmdir(dirname)
            except Exception:
                pass

            module.fail_json(msg="Error mounting %s: %s" % (name, msg))
    elif state == 'present':
        name, changed = set_mount(module, args)
    elif state == 'remounted':
        if not module.check_mode:
            res, msg = remount(module, args)

            if res:
                module.fail_json(msg="Error remounting %s: %s" % (name, msg))

        changed = True
    else:
        module.fail_json(msg='Unexpected position reached')

    # If the managed node is Solaris, convert the boot value type to Boolean
    #  to match the type of return value with the module argument.
    if platform.system().lower() == 'sunos':
        args['boot'] = boolean(args['boot'])
    module.exit_json(changed=changed, **args)
Exemplo n.º 56
0
    def run(self, tmp=None, task_vars=None):
        ''' handler for unarchive operations '''
        if task_vars is None:
            task_vars = dict()

        result = super(ActionModule, self).run(tmp, task_vars)

        source = self._task.args.get('src', None)
        dest = self._task.args.get('dest', None)
        remote_src = boolean(self._task.args.get('remote_src', False),
                             strict=False)
        creates = self._task.args.get('creates', None)
        decrypt = self._task.args.get('decrypt', True)

        # "copy" is deprecated in favor of "remote_src".
        if 'copy' in self._task.args:
            # They are mutually exclusive.
            if 'remote_src' in self._task.args:
                result['failed'] = True
                result[
                    'msg'] = "parameters are mutually exclusive: ('copy', 'remote_src')"
                return result
            # We will take the information from copy and store it in
            # the remote_src var to use later in this file.
            self._task.args['remote_src'] = remote_src = not boolean(
                self._task.args.pop('copy'), strict=False)

        if source is None or dest is None:
            result['failed'] = True
            result['msg'] = "src (or content) and dest are required"
            return result

        if not tmp:
            tmp = self._make_tmp_path()

        if creates:
            # do not run the command if the line contains creates=filename
            # and the filename already exists. This allows idempotence
            # of command executions.
            creates = self._remote_expand_user(creates)
            if self._remote_file_exists(creates):
                result['skipped'] = True
                result['msg'] = "skipped, since %s exists" % creates
                self._remove_tmp_path(tmp)
                return result

        dest = self._remote_expand_user(
            dest)  # CCTODO: Fix path for Windows hosts.
        source = os.path.expanduser(source)

        if not remote_src:
            try:
                source = self._loader.get_real_file(self._find_needle(
                    'files', source),
                                                    decrypt=decrypt)
            except AnsibleError as e:
                result['failed'] = True
                result['msg'] = to_text(e)
                self._remove_tmp_path(tmp)
                return result

        try:
            remote_stat = self._execute_remote_stat(dest,
                                                    all_vars=task_vars,
                                                    follow=True)
        except AnsibleError as e:
            result['failed'] = True
            result['msg'] = to_text(e)
            self._remove_tmp_path(tmp)
            return result

        if not remote_stat['exists'] or not remote_stat['isdir']:
            result['failed'] = True
            result['msg'] = "dest '%s' must be an existing dir" % dest
            self._remove_tmp_path(tmp)
            return result

        if not remote_src:
            # transfer the file to a remote tmp location
            tmp_src = self._connection._shell.join_path(tmp, 'source')
            self._transfer_file(source, tmp_src)

        # handle diff mode client side
        # handle check mode client side

        if not remote_src:
            # fix file permissions when the copy is done as a different user
            self._fixup_perms2((tmp, tmp_src))
            # Build temporary module_args.
            new_module_args = self._task.args.copy()
            new_module_args.update(
                dict(
                    src=tmp_src,
                    original_basename=os.path.basename(source),
                ), )

        else:
            new_module_args = self._task.args.copy()
            new_module_args.update(
                dict(original_basename=os.path.basename(source), ), )

        # remove action plugin only key
        for key in ('decrypt', ):
            if key in new_module_args:
                del new_module_args[key]

        # execute the unarchive module now, with the updated args
        result.update(
            self._execute_module(module_args=new_module_args,
                                 task_vars=task_vars))
        self._remove_tmp_path(tmp)
        return result
Exemplo n.º 57
0
    def run(self, tmp=None, task_vars=None):
        
        # 调用 ActionBase的run
        if task_vars is None:
            task_vars = dict()
        result = super(ActionModule, self).run(tmp, task_vars)

        # 获取入参
        source = self._task.args.get('src', None)
        dest   = self._task.args.get('dest', None)
        sub_str=self._task.args.get('sub_str',None)
        remote_src= boolean(self._task.args.get('remote_src',False),strict=False)
        mode=self._task.args.get('mode',0755)
        if os.path.isfile(source)==False and remote_src== False:
            result['failed'] = True
            result['msg']=u'源文件不存在'
            return result
        try:
            tmp_source = self._loader.get_real_file(source)
        except AnsibleFileNotFound as e:
            result['failed'] = True
            result['msg'] = "could not find src=%s, %s" % (source, e)
            self._remove_tmp_path(tmp)
            return result
        try:
            if sub_str:
                if type(sub_str)==type({}):
                    str_dic=sub_str
                else:
                    str_dic=eval(sub_str)
                with open(tmp_source, 'r') as f:
                    will_sub=f.read()
                    for every_value in str_dic:
                        will_sub=re.sub(every_value,str_dic[every_value],will_sub)
                    template_data = to_text(will_sub)

            else:
                with open(tmp_source, 'r') as f:
                    template_data = to_text(f.read())

            resultant = self._templar.do_template(template_data, preserve_trailing_newlines=True, escape_backslashes=False)
        except Exception as e:
            result['failed'] = True
            result['msg'] = "%s: %s" % (type(e).__name__, to_text(e))
            return result
        finally:
            self._loader.cleanup_tmp_file(tmp_source)
        new_task=self._task.copy()
        new_task.args.pop('sub_str',None)
        if remote_src ==False:
            try:
                tempdir = tempfile.mkdtemp()
                result_file = os.path.join(tempdir, os.path.basename(source))
                with open(result_file, 'wb') as f:
                    f.write(to_bytes(resultant, errors='surrogate_or_strict'))
                new_task.args.update(
                    dict(
                        src=result_file,
                        dest=dest,
                        follow=False,
                        mode=mode,
                    ),
                )
                copy_action = self._shared_loader_obj.action_loader.get('copy',
                                                                    task=new_task,
                                                                    connection=self._connection,
                                                                    play_context=self._play_context,
                                                                    loader=self._loader,
                                                                    templar=self._templar,
                                                                    shared_loader_obj=self._shared_loader_obj)
                result.update(copy_action.run(task_vars=task_vars))
            finally:
                shutil.rmtree(tempdir)
                return result
        else:
            result.update(
                self._execute_module(
                    module_name='copy',
                    module_args=dict(
                    src=source,
                    dest=dest,
                    original_basename=os.path.basename(source),
                    follow=True,
                    remote_src=True,
                    mode=mode,
                    ),
                task_vars=task_vars,
                tmp=tmp,
                delete_remote_tmp=False,
                )
            )
            return result
Exemplo n.º 58
0
    def run(self, tmp=None, task_vars=None):

        self._supports_check_mode = False

        result = super(ActionModule, self).run(tmp, task_vars)

        if task_vars is None:
            task_vars = dict()

        src = self._task.args.get('src', None)
        dest = self._task.args.get('dest', None)
        delimiter = self._task.args.get('delimiter', None)
        remote_src = self._task.args.get('remote_src', 'yes')
        regexp = self._task.args.get('regexp', None)
        follow = self._task.args.get('follow', False)
        ignore_hidden = self._task.args.get('ignore_hidden', False)
        decrypt = self._task.args.get('decrypt', True)

        if src is None or dest is None:
            result['failed'] = True
            result['msg'] = "src and dest are required"
            return result

        if boolean(remote_src, strict=False):
            result.update(self._execute_module(tmp=tmp, task_vars=task_vars))
            return result
        else:
            try:
                src = self._find_needle('files', src)
            except AnsibleError as e:
                result['failed'] = True
                result['msg'] = to_native(e)
                return result

        if not tmp:
            tmp = self._make_tmp_path()

        if not os.path.isdir(src):
            result['failed'] = True
            result['msg'] = u"Source (%s) is not a directory" % src
            return result

        _re = None
        if regexp is not None:
            _re = re.compile(regexp)

        # Does all work assembling the file
        path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden, decrypt)

        path_checksum = checksum_s(path)
        dest = self._remote_expand_user(dest)
        dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow, tmp=tmp)

        diff = {}

        # setup args for running modules
        new_module_args = self._task.args.copy()

        # clean assemble specific options
        for opt in ['remote_src', 'regexp', 'delimiter', 'ignore_hidden', 'decrypt']:
            if opt in new_module_args:
                del new_module_args[opt]

        new_module_args.update(
            dict(
                dest=dest,
                original_basename=os.path.basename(src),
            )
        )

        if path_checksum != dest_stat['checksum']:

            if self._play_context.diff:
                diff = self._get_diff_data(dest, path, task_vars)

            remote_path = self._connection._shell.join_path(tmp, 'src')
            xfered = self._transfer_file(path, remote_path)

            # fix file permissions when the copy is done as a different user
            self._fixup_perms2((tmp, remote_path))

            new_module_args.update(dict(src=xfered,))

            res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False)
            if diff:
                res['diff'] = diff
            result.update(res)
        else:
            result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False))

        self._remove_tmp_path(tmp)

        return result
Exemplo n.º 59
0
def ensure_type(value, value_type):
    ''' return a configuration variable with casting
    :arg value: The value to ensure correct typing of
    :kwarg value_type: The type of the value.  This can be any of the following strings:
        :boolean: sets the value to a True or False value
        :integer: Sets the value to an integer or raises a ValueType error
        :float: Sets the value to a float or raises a ValueType error
        :list: Treats the value as a comma separated list.  Split the value
            and return it as a python list.
        :none: Sets the value to None
        :path: Expands any environment variables and tilde's in the value.
        :tmp_path: Create a unique temporary directory inside of the directory
            specified by value and return its path.
        :pathlist: Treat the value as a typical PATH string.  (On POSIX, this
            means colon separated strings.)  Split the value and then expand
            each part for environment variables and tildes.
    '''
    if value_type:
        value_type = value_type.lower()

    if value_type in ('boolean', 'bool'):
        value = boolean(value, strict=False)

    elif value:
        if value_type in ('integer', 'int'):
            value = int(value)

        elif value_type == 'float':
            value = float(value)

        elif value_type == 'list':
            if isinstance(value, string_types):
                value = [x.strip() for x in value.split(',')]

        elif value_type == 'none':
            if value == "None":
                value = None

        elif value_type == 'path':
            value = resolve_path(value)

        elif value_type in ('tmp', 'temppath', 'tmppath'):
            value = resolve_path(value)
            if not os.path.exists(value):
                makedirs_safe(value, 0o700)
            prefix = 'ansible-local-%s' % os.getpid()
            value = tempfile.mkdtemp(prefix=prefix, dir=value)

        elif value_type == 'pathspec':
            if isinstance(value, string_types):
                value = value.split(os.pathsep)
            value = [resolve_path(x) for x in value]

        elif value_type == 'pathlist':
            if isinstance(value, string_types):
                value = value.split(',')
            value = [resolve_path(x) for x in value]

        # defaults to string types
        elif isinstance(value, string_types):
            value = unquote(value)

    return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
Exemplo n.º 60
0
    def run(self):
        '''
        Run the given playbook, based on the settings in the play which
        may limit the runs to serialized groups, etc.
        '''

        result = 0
        entrylist = []
        entry = {}
        try:
            # preload become/connection/shell to set config defs cached
            list(connection_loader.all(class_only=True))
            list(shell_loader.all(class_only=True))
            list(become_loader.all(class_only=True))

            for playbook_path in self._playbooks:
                pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
                # FIXME: move out of inventory self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))

                if self._tqm is None:  # we are doing a listing
                    entry = {'playbook': playbook_path}
                    entry['plays'] = []
                else:
                    # make sure the tqm has callbacks loaded
                    self._tqm.load_callbacks()
                    self._tqm.send_callback('v2_playbook_on_start', pb)

                i = 1
                plays = pb.get_plays()
                display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path)))

                for play in plays:
                    if play._included_path is not None:
                        self._loader.set_basedir(play._included_path)
                    else:
                        self._loader.set_basedir(pb._basedir)

                    # clear any filters which may have been applied to the inventory
                    self._inventory.remove_restriction()

                    # Allow variables to be used in vars_prompt fields.
                    all_vars = self._variable_manager.get_vars(play=play)
                    templar = Templar(loader=self._loader, variables=all_vars)
                    setattr(play, 'vars_prompt', templar.template(play.vars_prompt))

                    # FIXME: this should be a play 'sub object' like loop_control
                    if play.vars_prompt:
                        for var in play.vars_prompt:
                            vname = var['name']
                            prompt = var.get("prompt", vname)
                            default = var.get("default", None)
                            private = boolean(var.get("private", True))
                            confirm = boolean(var.get("confirm", False))
                            encrypt = var.get("encrypt", None)
                            salt_size = var.get("salt_size", None)
                            salt = var.get("salt", None)
                            unsafe = var.get("unsafe", None)

                            if vname not in self._variable_manager.extra_vars:
                                if self._tqm:
                                    self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt,
                                                            default, unsafe)
                                    play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe)
                                else:  # we are either in --list-<option> or syntax check
                                    play.vars[vname] = default

                    # Post validate so any play level variables are templated
                    all_vars = self._variable_manager.get_vars(play=play)
                    templar = Templar(loader=self._loader, variables=all_vars)
                    play.post_validate(templar)

                    if context.CLIARGS['syntax']:
                        continue

                    if self._tqm is None:
                        # we are just doing a listing
                        entry['plays'].append(play)

                    else:
                        self._tqm._unreachable_hosts.update(self._unreachable_hosts)

                        previously_failed = len(self._tqm._failed_hosts)
                        previously_unreachable = len(self._tqm._unreachable_hosts)

                        break_play = False
                        # we are actually running plays
                        batches = self._get_serialized_batches(play)
                        if len(batches) == 0:
                            self._tqm.send_callback('v2_playbook_on_play_start', play)
                            self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
                        for batch in batches:
                            # restrict the inventory to the hosts in the serialized batch
                            self._inventory.restrict_to_hosts(batch)
                            # and run it...
                            result = self._tqm.run(play=play)

                            # break the play if the result equals the special return code
                            if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0:
                                result = self._tqm.RUN_FAILED_HOSTS
                                break_play = True

                            # check the number of failures here, to see if they're above the maximum
                            # failure percentage allowed, or if any errors are fatal. If either of those
                            # conditions are met, we break out, otherwise we only break out if the entire
                            # batch failed
                            failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \
                                (previously_failed + previously_unreachable)

                            if len(batch) == failed_hosts_count:
                                break_play = True
                                break

                            # update the previous counts so they don't accumulate incorrectly
                            # over multiple serial batches
                            previously_failed += len(self._tqm._failed_hosts) - previously_failed
                            previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable

                            # save the unreachable hosts from this batch
                            self._unreachable_hosts.update(self._tqm._unreachable_hosts)

                        if break_play:
                            break

                    i = i + 1  # per play

                if entry:
                    entrylist.append(entry)  # per playbook

                # send the stats callback for this playbook
                if self._tqm is not None:
                    if C.RETRY_FILES_ENABLED:
                        retries = set(self._tqm._failed_hosts.keys())
                        retries.update(self._tqm._unreachable_hosts.keys())
                        retries = sorted(retries)
                        if len(retries) > 0:
                            if C.RETRY_FILES_SAVE_PATH:
                                basedir = C.RETRY_FILES_SAVE_PATH
                            elif playbook_path:
                                basedir = os.path.dirname(os.path.abspath(playbook_path))
                            else:
                                basedir = '~/'

                            (retry_name, _) = os.path.splitext(os.path.basename(playbook_path))
                            filename = os.path.join(basedir, "%s.retry" % retry_name)
                            if self._generate_retry_inventory(filename, retries):
                                display.display("\tto retry, use: --limit @%s\n" % filename)

                    self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)

                # if the last result wasn't zero, break out of the playbook file name loop
                if result != 0:
                    break

            if entrylist:
                return entrylist

        finally:
            if self._tqm is not None:
                self._tqm.cleanup()
            if self._loader:
                self._loader.cleanup_all_tmp_files()

        if context.CLIARGS['syntax']:
            display.display("No issues encountered")
            return result

        if context.CLIARGS['start_at_task'] and not self._tqm._start_at_done:
            display.error(
                "No matching task \"%s\" found."
                " Note: --start-at-task can only follow static includes."
                % context.CLIARGS['start_at_task']
            )

        return result