Ejemplo n.º 1
0
 def test_basic_error(self):
     e = AnsibleError(self.message)
     self.assertEqual(e.message, 'ERROR! ' + self.message)
     self.assertEqual(e.__repr__(), 'ERROR! ' + self.message)
Ejemplo n.º 2
0
    def _process_pending_results(self, iterator, one_pass=False):
        '''
        Reads results off the final queue and takes appropriate action
        based on the result (executing callbacks, updating state, etc.).
        '''

        ret_results = []

        while not self._final_q.empty() and not self._tqm._terminated:
            try:
                result = self._final_q.get()
                display.debug("got result from result worker: %s" % ([text_type(x) for x in result],))

                # helper method, used to find the original host from the one
                # returned in the result/message, which has been serialized and
                # thus had some information stripped from it to speed up the
                # serialization process
                def get_original_host(host):
                    if host.name in self._inventory._hosts_cache:
                       return self._inventory._hosts_cache[host.name]
                    else:
                       return self._inventory.get_host(host.name)

                # all host status messages contain 2 entries: (msg, task_result)
                if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'):
                    task_result = result[1]
                    host = get_original_host(task_result._host)
                    task = task_result._task
                    if result[0] == 'host_task_failed' or task_result.is_failed():
                        if not task.ignore_errors:
                            display.debug("marking %s as failed" % host.name)
                            if task.run_once:
                                # if we're using run_once, we have to fail every host here
                                [iterator.mark_host_failed(h) for h in self._inventory.get_hosts(iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts]
                            else:
                                iterator.mark_host_failed(host)

                            # only add the host to the failed list officially if it has
                            # been failed by the iterator
                            if iterator.is_failed(host):
                                self._tqm._failed_hosts[host.name] = True
                                self._tqm._stats.increment('failures', host.name)
                        else:
                            self._tqm._stats.increment('ok', host.name)
                        self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors)
                    elif result[0] == 'host_unreachable':
                        self._tqm._unreachable_hosts[host.name] = True
                        self._tqm._stats.increment('dark', host.name)
                        self._tqm.send_callback('v2_runner_on_unreachable', task_result)
                    elif result[0] == 'host_task_skipped':
                        self._tqm._stats.increment('skipped', host.name)
                        self._tqm.send_callback('v2_runner_on_skipped', task_result)
                    elif result[0] == 'host_task_ok':
                        if task.action != 'include':
                            self._tqm._stats.increment('ok', host.name)
                            if 'changed' in task_result._result and task_result._result['changed']:
                                self._tqm._stats.increment('changed', host.name)
                            self._tqm.send_callback('v2_runner_on_ok', task_result)

                        if self._diff:
                            self._tqm.send_callback('v2_on_file_diff', task_result)

                    self._pending_results -= 1
                    if host.name in self._blocked_hosts:
                        del self._blocked_hosts[host.name]

                    # If this is a role task, mark the parent role as being run (if
                    # the task was ok or failed, but not skipped or unreachable)
                    if task_result._task._role is not None and result[0] in ('host_task_ok', 'host_task_failed'):
                        # lookup the role in the ROLE_CACHE to make sure we're dealing
                        # with the correct object and mark it as executed
                        for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[task_result._task._role._role_name]):
                            if role_obj._uuid == task_result._task._role._uuid:
                                role_obj._had_task_run[host.name] = True

                    ret_results.append(task_result)

                elif result[0] == 'add_host':
                    result_item = result[1]
                    new_host_info = result_item.get('add_host', dict())

                    self._add_host(new_host_info, iterator)

                elif result[0] == 'add_group':
                    host = get_original_host(result[1])
                    result_item = result[2]
                    self._add_group(host, result_item)

                elif result[0] == 'notify_handler':
                    task_result  = result[1]
                    handler_name = result[2]

                    original_host = get_original_host(task_result._host)
                    original_task = iterator.get_original_task(original_host, task_result._task)
                    if handler_name not in self._notified_handlers:
                        self._notified_handlers[handler_name] = []

                    if original_host not in self._notified_handlers[handler_name]:
                        self._notified_handlers[handler_name].append(original_host)
                        display.vv("NOTIFIED HANDLER %s" % (handler_name,))

                elif result[0] == 'register_host_var':
                    # essentially the same as 'set_host_var' below, however we
                    # never follow the delegate_to value for registered vars and
                    # the variable goes in the fact_cache
                    host      = get_original_host(result[1])
                    task      = result[2]
                    var_value = wrap_var(result[3])
                    var_name  = task.register

                    if task.run_once:
                        host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
                    else:
                        host_list = [host]

                    for target_host in host_list:
                        self._variable_manager.set_nonpersistent_facts(target_host, {var_name: var_value})

                elif result[0] in ('set_host_var', 'set_host_facts'):
                    host = get_original_host(result[1])
                    task = result[2]
                    item = result[3]

                    # find the host we're actually refering too here, which may
                    # be a host that is not really in inventory at all
                    if task.delegate_to is not None and task.delegate_facts:
                        task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
                        self.add_tqm_variables(task_vars, play=iterator._play)
                        if item is not None:
                            task_vars['item'] = item
                        templar = Templar(loader=self._loader, variables=task_vars)
                        host_name = templar.template(task.delegate_to)
                        actual_host = self._inventory.get_host(host_name)
                        if actual_host is None:
                            actual_host = Host(name=host_name)
                    else:
                        actual_host = host

                    if task.run_once:
                        host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
                    else:
                        host_list = [actual_host]

                    if result[0] == 'set_host_var':
                        var_name  = result[4]
                        var_value = result[5]
                        for target_host in host_list:
                            self._variable_manager.set_host_variable(target_host, var_name, var_value)
                    elif result[0] == 'set_host_facts':
                        facts = result[4]
                        for target_host in host_list:
                            if task.action == 'set_fact':
                                self._variable_manager.set_nonpersistent_facts(target_host, facts)
                            else:
                                self._variable_manager.set_host_facts(target_host, facts)
                elif result[0].startswith('v2_runner_item') or result[0] == 'v2_runner_retry':
                    self._tqm.send_callback(result[0], result[1])
                elif result[0] == 'v2_on_file_diff':
                    if self._diff:
                        self._tqm.send_callback('v2_on_file_diff', result[1])
                else:
                    raise AnsibleError("unknown result message received: %s" % result[0])

            except Queue.Empty:
                time.sleep(0.0001)

            if one_pass:
                break

        return ret_results
Ejemplo n.º 3
0
    def start_session(self):
        ''' start ssm session '''

        if self.get_option('instance_id') is None:
            self.instance_id = self.host
        else:
            self.instance_id = self.get_option('instance_id')

        display.vvv(u"ESTABLISH SSM CONNECTION TO: {0}".format(
            self.instance_id),
                    host=self.host)

        executable = self.get_option('plugin')
        if not os.path.exists(
                to_bytes(executable, errors='surrogate_or_strict')):
            raise AnsibleError(
                "failed to find the executable specified %s."
                " Please verify if the executable exists and re-try." %
                executable)

        profile_name = ''
        region_name = self.get_option('region')
        ssm_parameters = dict()

        client = boto3.client('ssm', region_name=region_name)
        self._client = client
        response = client.start_session(Target=self.instance_id,
                                        Parameters=ssm_parameters)
        self._session_id = response['SessionId']

        cmd = [
            executable,
            json.dumps(response), region_name, "StartSession", profile_name,
            json.dumps({"Target": self.instance_id}), client.meta.endpoint_url
        ]

        display.vvvv(u"SSM COMMAND: {0}".format(to_text(cmd)), host=self.host)

        stdout_r, stdout_w = pty.openpty()
        session = subprocess.Popen(
            cmd,
            stdin=subprocess.PIPE,
            stdout=stdout_w,
            stderr=subprocess.PIPE,
            close_fds=True,
            bufsize=0,
        )

        os.close(stdout_w)
        self._stdout = os.fdopen(stdout_r, 'rb', 0)
        self._session = session
        self._poll_stdout = select.poll()
        self._poll_stdout.register(self._stdout, select.POLLIN)

        # Disable command echo and prompt.
        self._prepare_terminal()

        display.vvv(u"SSM CONNECTION ID: {0}".format(self._session_id),
                    host=self.host)

        return session
Ejemplo n.º 4
0
    def _winrm_connect(self):
        '''
        Establish a WinRM connection over HTTP/HTTPS.
        '''
        display.vvv(
            "ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" %
            (self._winrm_user, self._winrm_port, self._winrm_host),
            host=self._winrm_host)

        winrm_host = self._winrm_host
        if HAS_IPADDRESS:
            display.vvvv("checking if winrm_host %s is an IPv6 address" %
                         winrm_host)
            try:
                ipaddress.IPv6Address(winrm_host)
            except ipaddress.AddressValueError:
                pass
            else:
                winrm_host = "[%s]" % winrm_host

        netloc = '%s:%d' % (winrm_host, self._winrm_port)
        endpoint = urlunsplit(
            (self._winrm_scheme, netloc, self._winrm_path, '', ''))
        errors = []
        for transport in self._winrm_transport:
            if transport == 'kerberos':
                if not HAVE_KERBEROS:
                    errors.append(
                        'kerberos: the python kerberos library is not installed'
                    )
                    continue
                if self._kerb_managed:
                    self._kerb_auth(self._winrm_user, self._winrm_pass)
            display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' %
                          (transport, endpoint),
                          host=self._winrm_host)
            try:
                winrm_kwargs = self._winrm_kwargs.copy()
                if self._winrm_connection_timeout:
                    winrm_kwargs[
                        'operation_timeout_sec'] = self._winrm_connection_timeout
                    winrm_kwargs[
                        'read_timeout_sec'] = self._winrm_connection_timeout + 1
                protocol = Protocol(endpoint,
                                    transport=transport,
                                    **winrm_kwargs)

                # open the shell from connect so we know we're able to talk to the server
                if not self.shell_id:
                    self.shell_id = protocol.open_shell(
                        codepage=65001)  # UTF-8
                    display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id,
                                  host=self._winrm_host)

                return protocol
            except Exception as e:
                err_msg = to_text(e).strip()
                if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg,
                             re.I):
                    raise AnsibleError('the connection attempt timed out')
                m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg)
                if m:
                    code = int(m.groups()[0])
                    if code == 401:
                        err_msg = 'the specified credentials were rejected by the server'
                    elif code == 411:
                        return protocol
                errors.append(u'%s: %s' % (transport, err_msg))
                display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' %
                              (err_msg, to_text(traceback.format_exc())),
                              host=self._winrm_host)
        if errors:
            raise AnsibleConnectionFailure(', '.join(map(to_native, errors)))
        else:
            raise AnsibleError('No transport found for WinRM connection')
Ejemplo n.º 5
0
    def put_file(self, in_path, out_path):
        super(Connection, self).put_file(in_path, out_path)
        out_path = self._shell._unquote(out_path)
        display.vvv('PUT "%s" TO "%s"' % (in_path, out_path),
                    host=self._winrm_host)
        if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
            raise AnsibleFileNotFound('file or module does not exist: "%s"' %
                                      to_native(in_path))

        script_template = u'''
            begin {{
                $path = '{0}'

                $DebugPreference = "Continue"
                $ErrorActionPreference = "Stop"
                Set-StrictMode -Version 2

                $fd = [System.IO.File]::Create($path)

                $sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create()

                $bytes = @() #initialize for empty file case
            }}
            process {{
               $bytes = [System.Convert]::FromBase64String($input)
               $sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null
               $fd.Write($bytes, 0, $bytes.Length)
            }}
            end {{
                $sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null

                $hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant()

                $fd.Close()

                Write-Output "{{""sha1"":""$hash""}}"
            }}
        '''

        script = script_template.format(self._shell._escape(out_path))
        cmd_parts = self._shell._encode_script(script,
                                               as_list=True,
                                               strict_mode=False,
                                               preserve_rc=False)

        result = self._winrm_exec(cmd_parts[0],
                                  cmd_parts[1:],
                                  stdin_iterator=self._put_file_stdin_iterator(
                                      in_path, out_path))
        # TODO: improve error handling
        if result.status_code != 0:
            raise AnsibleError(to_native(result.std_err))

        put_output = json.loads(result.std_out)
        remote_sha1 = put_output.get("sha1")

        if not remote_sha1:
            raise AnsibleError("Remote sha1 was not returned")

        local_sha1 = secure_hash(in_path)

        if not remote_sha1 == local_sha1:
            raise AnsibleError(
                "Remote sha1 hash {0} does not match local hash {1}".format(
                    to_native(remote_sha1), to_native(local_sha1)))
Ejemplo n.º 6
0
def process_support_levels(plugin_info, templates, output_dir, plugin_type):
    supported_by = {
        'Ansible Core Team': {
            'slug':
            'core_supported',
            'modules': [],
            'output':
            'core_maintained.rst',
            'blurb':
            "These are :doc:`modules maintained by the"
            " Ansible Core Team<core_maintained>` and will always ship"
            " with Ansible itself."
        },
        'Ansible Network Team': {
            'slug':
            'network_supported',
            'modules': [],
            'output':
            'network_maintained.rst',
            'blurb':
            "These are :doc:`modules maintained by the"
            " Ansible Network Team<network_maintained>` in"
            " a relationship similar to how the Ansible Core Team"
            " maintains the Core modules."
        },
        'Ansible Partners': {
            'slug':
            'partner_supported',
            'modules': [],
            'output':
            'partner_maintained.rst',
            'blurb':
            """
Some examples of :doc:`Certified Modules<partner_maintained>` are those submitted by other
companies. Maintainers of these types of modules must watch for any issues reported or pull requests
raised against the module.

The Ansible Core Team will review all modules becoming certified.  Core committers will review
proposed changes to existing Certified Modules once the community maintainers of the module have
approved the changes. Core committers will also ensure that any issues that arise due to Ansible
engine changes will be remediated.  Also, it is strongly recommended (but not presently required)
for these types of modules to have unit tests.

These modules are currently shipped with Ansible, but might be shipped separately in the future.
"""
        },
        'Ansible Community': {
            'slug':
            'community_supported',
            'modules': [],
            'output':
            'community_maintained.rst',
            'blurb':
            """
These are :doc:`modules maintained by the Ansible Community<community_maintained>`.  They **are
not** supported by the Ansible Core Team or by companies/partners associated to the module.

They are still fully usable, but the response rate to issues is purely up to the community.  Best
effort support will be provided but is not covered under any support contracts.

These modules are currently shipped with Ansible, but will most likely be shipped separately in the future.
                                          """
        },
    }

    # only gen support pages for modules for now, need to split and namespace templates and generated docs
    if plugin_type == 'plugins':
        return
    # Separate the modules by support_level
    for module, info in plugin_info.items():
        if not info.get('metadata', None):
            display.warning('no metadata for %s' % module)
            continue
        if info['metadata']['supported_by'] == 'core':
            supported_by['Ansible Core Team']['modules'].append(module)
        elif info['metadata']['supported_by'] == 'network':
            supported_by['Ansible Network Team']['modules'].append(module)
        elif info['metadata']['supported_by'] == 'certified':
            supported_by['Ansible Partners']['modules'].append(module)
        elif info['metadata']['supported_by'] == 'community':
            supported_by['Ansible Community']['modules'].append(module)
        else:
            raise AnsibleError('Unknown supported_by value: %s' %
                               info['metadata']['supported_by'])

    # Render the module lists
    for maintainers, data in supported_by.items():
        template_data = {
            'maintainers': maintainers,
            'modules': data['modules'],
            'slug': data['slug'],
            'module_info': plugin_info,
        }
        text = templates['support_list'].render(template_data)
        write_data(text, output_dir, data['output'])
Ejemplo n.º 7
0
    def _check_conditional(self, conditional, templar, all_vars):
        '''
        This method does the low-level evaluation of each conditional
        set on this object, using jinja2 to wrap the conditionals for
        evaluation.
        '''

        original = conditional
        if conditional is None or conditional == '':
            return True

        # this allows for direct boolean assignments to conditionals "when: False"
        if isinstance(conditional, bool):
            return conditional

        if templar.is_template(conditional):
            display.warning('conditional statements should not include jinja2 '
                            'templating delimiters such as {{ }} or {%% %%}. '
                            'Found: %s' % conditional)

        bare_vars_warning = False
        if C.CONDITIONAL_BARE_VARS:
            if conditional in all_vars and VALID_VAR_REGEX.match(conditional):
                conditional = all_vars[conditional]
                bare_vars_warning = True

        # make sure the templar is using the variables specified with this method
        templar.set_available_variables(variables=all_vars)

        try:
            # if the conditional is "unsafe", disable lookups
            disable_lookups = hasattr(conditional, '__UNSAFE__')
            conditional = templar.template(conditional, disable_lookups=disable_lookups)
            if bare_vars_warning and not isinstance(conditional, bool):
                display.deprecated('evaluating %s as a bare variable, this behaviour will go away and you might need to add |bool'
                                   ' to the expression in the future. Also see CONDITIONAL_BARE_VARS configuration toggle.' % conditional, "2.12")
            if not isinstance(conditional, text_type) or conditional == "":
                return conditional

            # update the lookups flag, as the string returned above may now be unsafe
            # and we don't want future templating calls to do unsafe things
            disable_lookups |= hasattr(conditional, '__UNSAFE__')

            # First, we do some low-level jinja2 parsing involving the AST format of the
            # statement to ensure we don't do anything unsafe (using the disable_lookup flag above)
            class CleansingNodeVisitor(ast.NodeVisitor):
                def generic_visit(self, node, inside_call=False, inside_yield=False):
                    if isinstance(node, ast.Call):
                        inside_call = True
                    elif isinstance(node, ast.Yield):
                        inside_yield = True
                    elif isinstance(node, ast.Str):
                        if disable_lookups:
                            if inside_call and node.s.startswith("__"):
                                # calling things with a dunder is generally bad at this point...
                                raise AnsibleError(
                                    "Invalid access found in the conditional: '%s'" % conditional
                                )
                            elif inside_yield:
                                # we're inside a yield, so recursively parse and traverse the AST
                                # of the result to catch forbidden syntax from executing
                                parsed = ast.parse(node.s, mode='exec')
                                cnv = CleansingNodeVisitor()
                                cnv.visit(parsed)
                    # iterate over all child nodes
                    for child_node in ast.iter_child_nodes(node):
                        self.generic_visit(
                            child_node,
                            inside_call=inside_call,
                            inside_yield=inside_yield
                        )
            try:
                e = templar.environment.overlay()
                e.filters.update(templar._get_filters(e.filters))
                e.tests.update(templar._get_tests())

                res = e._parse(conditional, None, None)
                res = generate(res, e, None, None)
                parsed = ast.parse(res, mode='exec')

                cnv = CleansingNodeVisitor()
                cnv.visit(parsed)
            except Exception as e:
                raise AnsibleError("Invalid conditional detected: %s" % to_native(e))

            # and finally we generate and template the presented string and look at the resulting string
            presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
            val = templar.template(presented, disable_lookups=disable_lookups).strip()
            if val == "True":
                return True
            elif val == "False":
                return False
            else:
                raise AnsibleError("unable to evaluate conditional: %s" % original)
        except (AnsibleUndefinedVariable, UndefinedError) as e:
            # the templating failed, meaning most likely a variable was undefined. If we happened
            # to be looking for an undefined variable, return True, otherwise fail
            try:
                # first we extract the variable name from the error message
                var_name = re.compile(r"'(hostvars\[.+\]|[\w_]+)' is undefined").search(str(e)).groups()[0]
                # next we extract all defined/undefined tests from the conditional string
                def_undef = self.extract_defined_undefined(conditional)
                # then we loop through these, comparing the error variable name against
                # each def/undef test we found above. If there is a match, we determine
                # whether the logic/state mean the variable should exist or not and return
                # the corresponding True/False
                for (du_var, logic, state) in def_undef:
                    # when we compare the var names, normalize quotes because something
                    # like hostvars['foo'] may be tested against hostvars["foo"]
                    if var_name.replace("'", '"') == du_var.replace("'", '"'):
                        # the should exist is a xor test between a negation in the logic portion
                        # against the state (defined or undefined)
                        should_exist = ('not' in logic) != (state == 'defined')
                        if should_exist:
                            return False
                        else:
                            return True
                # as nothing above matched the failed var name, re-raise here to
                # trigger the AnsibleUndefinedVariable exception again below
                raise
            except Exception:
                raise AnsibleUndefinedVariable("error while evaluating conditional (%s): %s" % (original, e))
    def run(self, terms, variables=None, regions_enabled=True, short_version=None,
            deployment_type=None, **kwargs):

        predicates = []

        if short_version is None or deployment_type is None:
            if 'openshift' not in variables:
                raise AnsibleError("This lookup module requires openshift_facts to be run prior to use")

        if deployment_type is None:
            if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']:
                raise AnsibleError("This lookup module requires that the deployment_type be set")

            deployment_type = variables['openshift']['common']['deployment_type']

        if short_version is None:
            if 'short_version' in variables['openshift']['common']:
                short_version = variables['openshift']['common']['short_version']
            elif 'openshift_release' in variables:
                release = variables['openshift_release']
                if release.startswith('v'):
                    short_version = release[1:]
                else:
                    short_version = release
                short_version = '.'.join(short_version.split('.')[0:2])
            elif 'openshift_version' in variables:
                version = variables['openshift_version']
                short_version = '.'.join(version.split('.')[0:2])
            else:
                # pylint: disable=line-too-long
                raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
        if deployment_type == 'origin':
            if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', '3.7', '3.8', 'latest']:
                raise AnsibleError("Unknown short_version %s" % short_version)
        elif deployment_type == 'openshift-enterprise':
            if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', 'latest']:
                raise AnsibleError("Unknown short_version %s" % short_version)
        else:
            raise AnsibleError("Unknown deployment_type %s" % deployment_type)

        if deployment_type == 'origin':
            # convert short_version to enterprise short_version
            short_version = re.sub('^1.', '3.', short_version)

        if short_version == 'latest':
            short_version = '3.8'

        # Predicates ordered according to OpenShift Origin source:
        # origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go

        if short_version == '3.1':
            predicates.extend([
                {'name': 'PodFitsHostPorts'},
                {'name': 'PodFitsResources'},
                {'name': 'NoDiskConflict'},
                {'name': 'MatchNodeSelector'},
            ])

        if short_version == '3.2':
            predicates.extend([
                {'name': 'PodFitsHostPorts'},
                {'name': 'PodFitsResources'},
                {'name': 'NoDiskConflict'},
                {'name': 'NoVolumeZoneConflict'},
                {'name': 'MatchNodeSelector'},
                {'name': 'MaxEBSVolumeCount'},
                {'name': 'MaxGCEPDVolumeCount'}
            ])

        if short_version == '3.3':
            predicates.extend([
                {'name': 'NoDiskConflict'},
                {'name': 'NoVolumeZoneConflict'},
                {'name': 'MaxEBSVolumeCount'},
                {'name': 'MaxGCEPDVolumeCount'},
                {'name': 'GeneralPredicates'},
                {'name': 'PodToleratesNodeTaints'},
                {'name': 'CheckNodeMemoryPressure'}
            ])

        if short_version == '3.4':
            predicates.extend([
                {'name': 'NoDiskConflict'},
                {'name': 'NoVolumeZoneConflict'},
                {'name': 'MaxEBSVolumeCount'},
                {'name': 'MaxGCEPDVolumeCount'},
                {'name': 'GeneralPredicates'},
                {'name': 'PodToleratesNodeTaints'},
                {'name': 'CheckNodeMemoryPressure'},
                {'name': 'CheckNodeDiskPressure'},
                {'name': 'MatchInterPodAffinity'}
            ])

        if short_version in ['3.5', '3.6']:
            predicates.extend([
                {'name': 'NoVolumeZoneConflict'},
                {'name': 'MaxEBSVolumeCount'},
                {'name': 'MaxGCEPDVolumeCount'},
                {'name': 'MatchInterPodAffinity'},
                {'name': 'NoDiskConflict'},
                {'name': 'GeneralPredicates'},
                {'name': 'PodToleratesNodeTaints'},
                {'name': 'CheckNodeMemoryPressure'},
                {'name': 'CheckNodeDiskPressure'},
            ])

        if short_version in ['3.7', '3.8']:
            predicates.extend([
                {'name': 'NoVolumeZoneConflict'},
                {'name': 'MaxEBSVolumeCount'},
                {'name': 'MaxGCEPDVolumeCount'},
                {'name': 'MaxAzureDiskVolumeCount'},
                {'name': 'MatchInterPodAffinity'},
                {'name': 'NoDiskConflict'},
                {'name': 'GeneralPredicates'},
                {'name': 'PodToleratesNodeTaints'},
                {'name': 'CheckNodeMemoryPressure'},
                {'name': 'CheckNodeDiskPressure'},
                {'name': 'NoVolumeNodeConflict'},
            ])

        if regions_enabled:
            region_predicate = {
                'name': 'Region',
                'argument': {
                    'serviceAffinity': {
                        'labels': ['region']
                    }
                }
            }
            predicates.append(region_predicate)

        return predicates
Ejemplo n.º 9
0
 def fail_json(self, *args, **kwargs):
     raise AnsibleError(kwargs['msg'])
Ejemplo n.º 10
0
    def run(self,
            terms,
            variables=None,
            zones_enabled=True,
            short_version=None,
            deployment_type=None,
            **kwargs):

        priorities = []

        if short_version is None or deployment_type is None:
            if 'openshift' not in variables:
                raise AnsibleError(
                    "This lookup module requires openshift_facts to be run prior to use"
                )

        if deployment_type is None:
            if 'common' not in variables[
                    'openshift'] or 'deployment_type' not in variables[
                        'openshift']['common']:
                raise AnsibleError(
                    "This lookup module requires that the deployment_type be set"
                )

            deployment_type = variables['openshift']['common'][
                'deployment_type']

        if short_version is None:
            if 'short_version' in variables['openshift']['common']:
                short_version = variables['openshift']['common'][
                    'short_version']
            elif 'openshift_release' in variables:
                release = variables['openshift_release']
                if release.startswith('v'):
                    short_version = release[1:]
                else:
                    short_version = release
                short_version = '.'.join(short_version.split('.')[0:2])
            elif 'openshift_version' in variables:
                version = variables['openshift_version']
                short_version = '.'.join(version.split('.')[0:2])
            else:
                # pylint: disable=line-too-long
                raise AnsibleError(
                    "Either OpenShift needs to be installed or openshift_release needs to be specified"
                )

        if deployment_type == 'origin':
            if short_version not in [
                    '1.1', '1.2', '1.3', '1.4', '1.5', '3.6', '3.7', '3.8',
                    '3.9', 'latest'
            ]:
                raise AnsibleError("Unknown short_version %s" % short_version)
        elif deployment_type == 'openshift-enterprise':
            if short_version not in [
                    '3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8',
                    '3.9', 'latest'
            ]:
                raise AnsibleError("Unknown short_version %s" % short_version)
        else:
            raise AnsibleError("Unknown deployment_type %s" % deployment_type)

        if deployment_type == 'origin':
            # convert short_version to origin short_version
            short_version = re.sub('^1.', '3.', short_version)

        if short_version == 'latest':
            short_version = '3.9'

        if short_version == '3.1':
            priorities.extend([{
                'name': 'LeastRequestedPriority',
                'weight': 1
            }, {
                'name': 'BalancedResourceAllocation',
                'weight': 1
            }, {
                'name': 'SelectorSpreadPriority',
                'weight': 1
            }])

        if short_version == '3.2':
            priorities.extend([{
                'name': 'LeastRequestedPriority',
                'weight': 1
            }, {
                'name': 'BalancedResourceAllocation',
                'weight': 1
            }, {
                'name': 'SelectorSpreadPriority',
                'weight': 1
            }, {
                'name': 'NodeAffinityPriority',
                'weight': 1
            }])

        if short_version == '3.3':
            priorities.extend([{
                'name': 'LeastRequestedPriority',
                'weight': 1
            }, {
                'name': 'BalancedResourceAllocation',
                'weight': 1
            }, {
                'name': 'SelectorSpreadPriority',
                'weight': 1
            }, {
                'name': 'NodeAffinityPriority',
                'weight': 1
            }, {
                'name': 'TaintTolerationPriority',
                'weight': 1
            }])

        if short_version == '3.4':
            priorities.extend([{
                'name': 'LeastRequestedPriority',
                'weight': 1
            }, {
                'name': 'BalancedResourceAllocation',
                'weight': 1
            }, {
                'name': 'SelectorSpreadPriority',
                'weight': 1
            }, {
                'name': 'NodePreferAvoidPodsPriority',
                'weight': 10000
            }, {
                'name': 'NodeAffinityPriority',
                'weight': 1
            }, {
                'name': 'TaintTolerationPriority',
                'weight': 1
            }, {
                'name': 'InterPodAffinityPriority',
                'weight': 1
            }])

        if short_version in ['3.5', '3.6', '3.7', '3.8', '3.9']:
            priorities.extend([{
                'name': 'SelectorSpreadPriority',
                'weight': 1
            }, {
                'name': 'InterPodAffinityPriority',
                'weight': 1
            }, {
                'name': 'LeastRequestedPriority',
                'weight': 1
            }, {
                'name': 'BalancedResourceAllocation',
                'weight': 1
            }, {
                'name': 'NodePreferAvoidPodsPriority',
                'weight': 10000
            }, {
                'name': 'NodeAffinityPriority',
                'weight': 1
            }, {
                'name': 'TaintTolerationPriority',
                'weight': 1
            }])

        if zones_enabled:
            zone_priority = {
                'name': 'Zone',
                'argument': {
                    'serviceAntiAffinity': {
                        'label': 'zone'
                    }
                },
                'weight': 2
            }
            priorities.append(zone_priority)

        return priorities
Ejemplo n.º 11
0
def parse_cli(output, tmpl):
    if not isinstance(output, string_types):
        raise AnsibleError(
            "parse_cli input should be a string, but was given a input of %s" %
            (type(output)))

    if not os.path.exists(tmpl):
        raise AnsibleError('unable to locate parse_cli template: %s' % tmpl)

    try:
        template = Template()
    except ImportError as exc:
        raise AnsibleError(to_native(exc))

    with open(tmpl) as tmpl_fh:
        tmpl_content = tmpl_fh.read()

    spec = yaml.safe_load(tmpl_content)
    obj = {}

    for name, attrs in iteritems(spec['keys']):
        value = attrs['value']

        try:
            variables = spec.get('vars', {})
            value = template(value, variables)
        except Exception:
            pass

        if 'start_block' in attrs and 'end_block' in attrs:
            start_block = re.compile(attrs['start_block'])
            end_block = re.compile(attrs['end_block'])

            blocks = list()
            lines = None
            block_started = False

            for line in output.split('\n'):
                match_start = start_block.match(line)
                match_end = end_block.match(line)

                if match_start:
                    lines = list()
                    lines.append(line)
                    block_started = True

                elif match_end:
                    if lines:
                        lines.append(line)
                        blocks.append('\n'.join(lines))
                    block_started = False

                elif block_started:
                    if lines:
                        lines.append(line)

            regex_items = [re.compile(r) for r in attrs['items']]
            objects = list()

            for block in blocks:
                if isinstance(value, Mapping) and 'key' not in value:
                    items = list()
                    for regex in regex_items:
                        match = regex.search(block)
                        if match:
                            item_values = match.groupdict()
                            item_values['match'] = list(match.groups())
                            items.append(item_values)
                        else:
                            items.append(None)

                    obj = {}
                    for k, v in iteritems(value):
                        try:
                            obj[k] = template(v, {'item': items},
                                              fail_on_undefined=False)
                        except Exception:
                            obj[k] = None
                    objects.append(obj)

                elif isinstance(value, Mapping):
                    items = list()
                    for regex in regex_items:
                        match = regex.search(block)
                        if match:
                            item_values = match.groupdict()
                            item_values['match'] = list(match.groups())
                            items.append(item_values)
                        else:
                            items.append(None)

                    key = template(value['key'], {'item': items})
                    values = dict([(k, template(v, {'item': items}))
                                   for k, v in iteritems(value['values'])])
                    objects.append({key: values})

            return objects

        elif 'items' in attrs:
            regexp = re.compile(attrs['items'])
            when = attrs.get('when')
            conditional = "{%% if %s %%}True{%% else %%}False{%% endif %%}" % when

            if isinstance(value, Mapping) and 'key' not in value:
                values = list()

                for item in re_matchall(regexp, output):
                    entry = {}

                    for item_key, item_value in iteritems(value):
                        entry[item_key] = template(item_value, {'item': item})

                    if when:
                        if template(conditional, {'item': entry}):
                            values.append(entry)
                    else:
                        values.append(entry)

                obj[name] = values

            elif isinstance(value, Mapping):
                values = dict()

                for item in re_matchall(regexp, output):
                    entry = {}

                    for item_key, item_value in iteritems(value['values']):
                        entry[item_key] = template(item_value, {'item': item})

                    key = template(value['key'], {'item': item})

                    if when:
                        if template(conditional,
                                    {'item': {
                                        'key': key,
                                        'value': entry
                                    }}):
                            values[key] = entry
                    else:
                        values[key] = entry

                obj[name] = values

            else:
                item = re_search(regexp, output)
                obj[name] = template(value, {'item': item})

        else:
            obj[name] = value

    return obj
Ejemplo n.º 12
0
    def _connect(self):
        if not HAS_NCCLIENT:
            raise AnsibleError("%s: %s" % (missing_required_lib("ncclient"),
                                           to_native(NCCLIENT_IMP_ERR)))

        self.queue_message('log', 'ssh connection done, starting ncclient')

        allow_agent = True
        if self._play_context.password is not None:
            allow_agent = False
        setattr(self._play_context, 'allow_agent', allow_agent)

        self.key_filename = self._play_context.private_key_file or self.get_option(
            'private_key_file')
        if self.key_filename:
            self.key_filename = str(os.path.expanduser(self.key_filename))

        self._ssh_config = self.get_option('netconf_ssh_config')
        if self._ssh_config in BOOLEANS_TRUE:
            self._ssh_config = True
        elif self._ssh_config in BOOLEANS_FALSE:
            self._ssh_config = None

        # Try to guess the network_os if the network_os is set to auto
        if self._network_os == 'auto':
            for cls in netconf_loader.all(class_only=True):
                network_os = cls.guess_network_os(self)
                if network_os:
                    self.queue_message('vvv',
                                       'discovered network_os %s' % network_os)
                    self._network_os = network_os

        # If we have tried to detect the network_os but were unable to i.e. network_os is still 'auto'
        # then use default as the network_os

        if self._network_os == 'auto':
            # Network os not discovered. Set it to default
            self.queue_message(
                'vvv',
                'Unable to discover network_os. Falling back to default.')
            self._network_os = 'default'

        device_params = {
            'name':
            NETWORK_OS_DEVICE_PARAM_MAP.get(self._network_os)
            or self._network_os
        }

        try:
            port = self._play_context.port or 830
            self.queue_message(
                'vvv',
                "ESTABLISH NETCONF SSH CONNECTION FOR USER: %s on PORT %s TO %s WITH SSH_CONFIG = %s"
                % (self._play_context.remote_user, port,
                   self._play_context.remote_addr, self._ssh_config))
            self._manager = manager.connect(
                host=self._play_context.remote_addr,
                port=port,
                username=self._play_context.remote_user,
                password=self._play_context.password,
                key_filename=self.key_filename,
                hostkey_verify=self.get_option('host_key_checking'),
                look_for_keys=self.get_option('look_for_keys'),
                device_params=device_params,
                allow_agent=self._play_context.allow_agent,
                timeout=self.get_option('persistent_connect_timeout'),
                ssh_config=self._ssh_config)

            self._manager._timeout = self.get_option(
                'persistent_command_timeout')
        except SSHUnknownHostError as exc:
            raise AnsibleConnectionFailure(to_native(exc))
        except ImportError as exc:
            raise AnsibleError(
                "connection=netconf is not supported on {0}".format(
                    self._network_os))

        if not self._manager.connected:
            return 1, b'', b'not connected'

        self.queue_message('log',
                           'ncclient manager object created successfully')

        self._connected = True

        super(Connection, self)._connect()

        return 0, to_bytes(self._manager.session_id,
                           errors='surrogate_or_strict'), b''
Ejemplo n.º 13
0
    def run(self, terms, variables=None, **kwargs):

        results = []

        # Version parameters
        version = terms[1]

        wantstate = kwargs.pop('wantstate', None)
        wantmap   = kwargs.pop('wantmap', False)

        #########
        # Sapis #
        #########

        itemDefault = {
            'state': 'present'
        }

        for term in self._flatten(terms[0]):

            items = []

            # Short syntax
            if isinstance(term, basestring):
                item = itemDefault.copy()
                item.update({
                    'sapi': term
                })
            else:

                # Must be a dict
                if not isinstance(term, dict):
                    raise AnsibleError('Expect a dict')

                # Check index key
                if not term.has_key('sapi'):
                    raise AnsibleError('Expect "sapi" key')

                item = itemDefault.copy()
                item.update(term)

            # Known sapi ?
            if item.get('sapi') not in version['sapis']:
                raise AnsibleError('Unknown sapi "' + item.get('sapi') + '"')

            items.append(item)

            # Merge by index key
            for item in items:
                itemFound = False
                for i, result in enumerate(results):
                    if result['sapi'] == item['sapi']:
                        results[i] = item
                        itemFound = True
                        break

                if not itemFound:
                    results.append(item)

        # Filter by state
        if wantstate is not None:
            results = [result for result in results if result.get('state') == wantstate]

        # Map
        if wantmap:
            results = [result.get('sapi') for result in results]

        return results
Ejemplo n.º 14
0
    def run(self, terms, variables, **kwargs):

        res = []
        output = {}
        try:
            yang_file = terms[0]
        except IndexError:
            raise AnsibleError('the yang file must be specified')

        yang_file = os.path.realpath(os.path.expanduser(yang_file))
        if not os.path.isfile(yang_file):
            raise AnsibleError('%s invalid file path' % yang_file)

        search_path = kwargs.pop('search_path', '')
        annotations = kwargs.pop('annotations', '')

        for path in search_path.split(':'):
            path = os.path.realpath(os.path.expanduser(path))
            if path is not '' and not os.path.isdir(path):
                raise AnsibleError('%s is invalid directory path' % path)

        keep_tmp_files = kwargs.pop('keep_tmp_files', False)
        defaults = kwargs.pop('defaults', False)
        doctype = kwargs.pop('doctype', 'config')

        valid_doctype = ['config', 'data']
        if doctype not in valid_doctype:
            raise AnsibleError(
                'doctpe value %s is invalid, valid value are %s' %
                (path, ', '.join(valid_doctype)))

        pyang_exec_path = find_file_in_path('pyang')

        saved_arg = deepcopy(sys.argv)
        sys.stdout = sys.stderr = StringIO()

        plugindir = unfrackpath(YANG_SPEC_DIR_PATH)
        makedirs_safe(plugindir)

        tree_file_path = os.path.join(YANG_SPEC_DIR_PATH,
                                      '%s.%s' % (str(uuid.uuid4()), 'txt'))
        xml_file_path = os.path.join(YANG_SPEC_DIR_PATH,
                                     '%s.%s' % (str(uuid.uuid4()), 'xml'))
        json_file_path = os.path.join(YANG_SPEC_DIR_PATH,
                                      '%s.%s' % (str(uuid.uuid4()), 'json'))
        tree_file_path = os.path.realpath(os.path.expanduser(tree_file_path))
        xml_file_path = os.path.realpath(os.path.expanduser(xml_file_path))
        json_file_path = os.path.realpath(os.path.expanduser(json_file_path))

        # fill in the sys args before invoking pyang to retrieve xml skeleton
        sample_xml_skeleton_cmd = [
            pyang_exec_path, '-f', 'sample-xml-skeleton', '-o', xml_file_path,
            yang_file, '-p', search_path, "--sample-xml-skeleton-doctype",
            doctype, "--lax-quote-checks"
        ]

        if defaults:
            sample_xml_skeleton_cmd.append("--sample-xml-skeleton-defaults")

        if annotations:
            sample_xml_skeleton_cmd.append("--sample-xml-skeleton-annotations")

        try:
            subprocess.check_output(' '.join(sample_xml_skeleton_cmd),
                                    stderr=subprocess.STDOUT,
                                    shell=True)
        except SystemExit:
            pass
        except Exception as e:
            if not keep_tmp_files:
                shutil.rmtree(os.path.realpath(
                    os.path.expanduser(YANG_SPEC_DIR_PATH)),
                              ignore_errors=True)
            raise AnsibleError('Error while generating skeleton xml file: %s' %
                               e)
        finally:
            err = sys.stdout.getvalue()
            if err and 'error' in err.lower():
                if not keep_tmp_files:
                    shutil.rmtree(os.path.realpath(
                        os.path.expanduser(YANG_SPEC_DIR_PATH)),
                                  ignore_errors=True)
                raise AnsibleError(
                    'Error while generating skeleton xml file: %s' % err)

        sys.stdout.flush()
        sys.stderr.flush()

        # fill in the sys args before invoking pyang to retrieve tree structure
        tree_cmd = [
            pyang_exec_path, '-f', 'tree', '-o', tree_file_path, yang_file,
            '-p', search_path, "--lax-quote-checks"
        ]

        try:
            subprocess.check_output(' '.join(tree_cmd),
                                    stderr=subprocess.STDOUT,
                                    shell=True)
        except SystemExit:
            pass
        except Exception as e:
            if not keep_tmp_files:
                shutil.rmtree(os.path.realpath(
                    os.path.expanduser(YANG_SPEC_DIR_PATH)),
                              ignore_errors=True)
            raise AnsibleError('Error while generating tree file: %s' % e)
        finally:
            err = sys.stdout.getvalue()
            if err and 'error' in err.lower():
                if not keep_tmp_files:
                    shutil.rmtree(os.path.realpath(
                        os.path.expanduser(YANG_SPEC_DIR_PATH)),
                                  ignore_errors=True)
                raise AnsibleError('Error while generating tree file: %s' %
                                   err)

        sys.stdout.flush()
        sys.stderr.flush()

        plugin_file_src = os.path.join(
            os.path.dirname(os.path.abspath(__file__)), 'yang_spec.py')
        shutil.copy(plugin_file_src, plugindir)

        # fill in the sys args before invoking pyang to retrieve json skeleton
        sample_json_skeleton_cmd = [
            pyang_exec_path, '--plugindir', plugindir, '-f',
            'sample-json-skeleton', '-o', json_file_path, yang_file, '-p',
            search_path, '--lax-quote-checks',
            '--sample-json-skeleton-doctype', doctype
        ]

        if defaults:
            sample_json_skeleton_cmd.append("--sample-json-skeleton-defaults")

        try:
            subprocess.check_output(' '.join(sample_json_skeleton_cmd),
                                    stderr=subprocess.STDOUT,
                                    shell=True)
        except SystemExit:
            pass
        except Exception as e:
            if not keep_tmp_files:
                shutil.rmtree(os.path.realpath(
                    os.path.expanduser(YANG_SPEC_DIR_PATH)),
                              ignore_errors=True)
            raise AnsibleError(
                'Error while generating skeleton json file: %s' % e)
        finally:
            err = sys.stdout.getvalue()
            if err and 'error' in err.lower():
                if not keep_tmp_files:
                    shutil.rmtree(os.path.realpath(
                        os.path.expanduser(YANG_SPEC_DIR_PATH)),
                                  ignore_errors=True)
                raise AnsibleError('Error while generating tree json: %s' %
                                   err)

        with open(tree_file_path, 'r') as f:
            output['tree'] = f.read()

        with open(xml_file_path, 'r') as f:
            output['xml_skeleton'] = f.read()

        with open(json_file_path, 'r') as f:
            output['json_skeleton'] = json.load(f)

        if not keep_tmp_files:
            shutil.rmtree(plugindir, ignore_errors=True)
        res.append(output)
        return res
Ejemplo n.º 15
0
from ansible.utils.path import unfrackpath, makedirs_safe
from ansible.errors import AnsibleError

try:
    from __main__ import display
except ImportError:
    from ansible.utils.display import Display
    display = Display()

try:
    from pyang import plugin, error
    from pyang import statements
    from pyang.util import unique_prefixes
    from pyang.types import Decimal64Value
except ImportError:
    raise AnsibleError("pyang is not installed")

YANG_SPEC_DIR_PATH = "~/.ansible/tmp/yang_spec"


class LookupModule(LookupBase):
    def run(self, terms, variables, **kwargs):

        res = []
        output = {}
        try:
            yang_file = terms[0]
        except IndexError:
            raise AnsibleError('the yang file must be specified')

        yang_file = os.path.realpath(os.path.expanduser(yang_file))
Ejemplo n.º 16
0
    def preprocess_data(self, ds):
        '''
        tasks are especially complex arguments so need pre-processing.
        keep it short.
        '''

        if not isinstance(ds, dict):
            raise AnsibleAssertionError('ds (%s) should be a dict but was a %s' % (ds, type(ds)))

        # the new, cleaned datastructure, which will have legacy
        # items reduced to a standard structure suitable for the
        # attributes of the task class
        new_ds = AnsibleMapping()
        if isinstance(ds, AnsibleBaseYAMLObject):
            new_ds.ansible_pos = ds.ansible_pos

        # use the args parsing class to determine the action, args,
        # and the delegate_to value from the various possible forms
        # supported as legacy
        args_parser = ModuleArgsParser(task_ds=ds)
        try:
            (action, args, delegate_to) = args_parser.parse()
        except AnsibleParserError as e:
            # if the raises exception was created with obj=ds args, then it includes the detail
            # so we dont need to add it so we can just re raise.
            if e._obj:
                raise
            # But if it wasn't, we can add the yaml object now to get more detail
            raise AnsibleParserError(to_native(e), obj=ds, orig_exc=e)

        # the command/shell/script modules used to support the `cmd` arg,
        # which corresponds to what we now call _raw_params, so move that
        # value over to _raw_params (assuming it is empty)
        if action in ('command', 'shell', 'script'):
            if 'cmd' in args:
                if args.get('_raw_params', '') != '':
                    raise AnsibleError("The 'cmd' argument cannot be used when other raw parameters are specified."
                                       " Please put everything in one or the other place.", obj=ds)
                args['_raw_params'] = args.pop('cmd')

        new_ds['action'] = action
        new_ds['args'] = args
        new_ds['delegate_to'] = delegate_to

        # we handle any 'vars' specified in the ds here, as we may
        # be adding things to them below (special handling for includes).
        # When that deprecated feature is removed, this can be too.
        if 'vars' in ds:
            # _load_vars is defined in Base, and is used to load a dictionary
            # or list of dictionaries in a standard way
            new_ds['vars'] = self._load_vars(None, ds.get('vars'))
        else:
            new_ds['vars'] = dict()

        for (k, v) in iteritems(ds):
            if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell':
                # we don't want to re-assign these values, which were determined by the ModuleArgsParser() above
                continue
            elif k.startswith('with_') and k.replace("with_", "") in lookup_loader:
                # transform into loop property
                self._preprocess_with_loop(ds, new_ds, k, v)
            else:
                # pre-2.0 syntax allowed variables for include statements at the top level of the task,
                # so we move those into the 'vars' dictionary here, and show a deprecation message
                # as we will remove this at some point in the future.
                if action in ('include', 'include_tasks') and k not in self._valid_attrs and k not in self.DEPRECATED_ATTRIBUTES:
                    display.deprecated("Specifying include variables at the top-level of the task is deprecated."
                                       " Please see:\nhttp://docs.ansible.com/ansible/playbooks_roles.html#task-include-files-and-encouraging-reuse\n\n"
                                       " for currently supported syntax regarding included files and variables", version="2.7")
                    new_ds['vars'][k] = v
                elif k in self._valid_attrs:
                    new_ds[k] = v
                else:
                    display.warning("Ignoring invalid attribute: %s" % k)

        return super(Task, self).preprocess_data(new_ds)
Ejemplo n.º 17
0
def process_plugins(module_map, templates, outputname, output_dir,
                    ansible_version, plugin_type):
    for module in module_map:

        display.display("rendering: %s" % module)
        fname = module_map[module]['path']
        display.vvvvv(
            pp.pformat(('process_plugins info: ', module_map[module])))

        # crash if module is missing documentation and not explicitly hidden from docs index
        if module_map[module]['doc'] is None:
            display.error("%s MISSING DOCUMENTATION" % (fname, ))
            _doc = {
                plugin_type: module,
                'version_added': '2.4',
                'filename': fname
            }
            module_map[module]['doc'] = _doc
            # continue

        # Going to reference this heavily so make a short name to reference it by
        doc = module_map[module]['doc']
        display.vvvvv(pp.pformat(('process_plugins doc: ', doc)))

        # add some defaults for plugins that dont have most of the info
        doc['module'] = doc.get('module', module)
        doc['version_added'] = doc.get('version_added', 'historical')

        doc['plugin_type'] = plugin_type

        if module_map[module]['deprecated'] and 'deprecated' not in doc:
            display.warning("%s PLUGIN MISSING DEPRECATION DOCUMENTATION: %s" %
                            (fname, 'deprecated'))

        required_fields = ('short_description', )
        for field in required_fields:
            if field not in doc:
                display.warning("%s PLUGIN MISSING field '%s'" %
                                (fname, field))

        not_nullable_fields = ('short_description', )
        for field in not_nullable_fields:
            if field in doc and doc[field] in (None, ''):
                print(
                    "%s: WARNING: MODULE field '%s' DOCUMENTATION is null/empty value=%s"
                    % (fname, field, doc[field]))

        if 'version_added' not in doc:
            display.error("*** ERROR: missing version_added in: %s ***\n" %
                          module)

        #
        # The present template gets everything from doc so we spend most of this
        # function moving data into doc for the template to reference
        #

        if module_map[module]['aliases']:
            doc['aliases'] = module_map[module]['aliases']

        # don't show version added information if it's too old to be called out
        added = 0
        if doc['version_added'] == 'historical':
            del doc['version_added']
        else:
            added = doc['version_added']

        # Strip old version_added for the module
        if too_old(added):
            del doc['version_added']

        option_names = []

        if 'options' in doc and doc['options']:
            for (k, v) in iteritems(doc['options']):
                # Error out if there's no description
                if 'description' not in doc['options'][k]:
                    raise AnsibleError(
                        "Missing required description for option %s in %s " %
                        (k, module))

                # Error out if required isn't a boolean (people have been putting
                # information on when something is required in here.  Those need
                # to go in the description instead).
                required_value = doc['options'][k].get('required', False)
                if not isinstance(required_value, bool):
                    raise AnsibleError(
                        "Invalid required value '%s' for option '%s' in '%s' (must be truthy)"
                        % (required_value, k, module))

                # Strip old version_added information for options
                if 'version_added' in doc['options'][k] and too_old(
                        doc['options'][k]['version_added']):
                    del doc['options'][k]['version_added']

                # Make sure description is a list of lines for later formatting
                if not isinstance(doc['options'][k]['description'], list):
                    doc['options'][k]['description'] = [
                        doc['options'][k]['description']
                    ]

                option_names.append(k)

        option_names.sort()

        doc['option_keys'] = option_names
        doc['filename'] = fname
        doc['source'] = module_map[module]['source']
        doc['docuri'] = doc['module'].replace('_', '-')
        doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
        doc['ansible_version'] = ansible_version

        # check the 'deprecated' field in doc. We expect a dict potentially with 'why', 'version', and 'alternative' fields
        # examples = module_map[module]['examples']
        # print('\n\n%s: type of examples: %s\n' % (module, type(examples)))
        # if examples and not isinstance(examples, (str, unicode, list)):
        #    raise TypeError('module %s examples is wrong type (%s): %s' % (module, type(examples), examples))

        # use 'examples' for 'plainexamples' if 'examples' is a string
        if isinstance(module_map[module]['examples'], string_types):
            doc['plainexamples'] = module_map[module]['examples']  # plain text
        else:
            doc['plainexamples'] = ''

        doc['metadata'] = module_map[module]['metadata']

        display.vvvvv(pp.pformat(module_map[module]))
        if module_map[module]['returndocs']:
            try:
                doc['returndocs'] = yaml.safe_load(
                    module_map[module]['returndocs'])
            except Exception as e:
                print("%s:%s:yaml error:%s:returndocs=%s" %
                      (fname, module, e, module_map[module]['returndocs']))
                doc['returndocs'] = None
        else:
            doc['returndocs'] = None

        doc['author'] = doc.get('author', ['UNKNOWN'])
        if isinstance(doc['author'], string_types):
            doc['author'] = [doc['author']]

        display.v('about to template %s' % module)
        display.vvvvv(pp.pformat(doc))
        text = templates['plugin'].render(doc)
        if LooseVersion(jinja2.__version__) < LooseVersion('2.10'):
            # jinja2 < 2.10's indent filter indents blank lines.  Cleanup
            text = re.sub(' +\n', '\n', text)

        write_data(text, output_dir, outputname, module)
Ejemplo n.º 18
0
    def parse(self, inventory, loader, path, cache=None):

        super(InventoryModule, self).parse(inventory, loader, path)

        if cache is None:
            cache = self.get_option('cache')

        # Support inventory scripts that are not prefixed with some
        # path information but happen to be in the current working
        # directory when '.' is not in PATH.
        cmd = [path, "--list"]

        try:
            cache_key = self._get_cache_prefix(path)
            if not cache or cache_key not in self._cache:
                try:
                    sp = subprocess.Popen(cmd,
                                          stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE)
                except OSError as e:
                    raise AnsibleParserError("problem running %s (%s)" %
                                             (' '.join(cmd), to_native(e)))
                (stdout, stderr) = sp.communicate()

                path = to_native(path)
                err = to_native(stderr or "")

                if err and not err.endswith('\n'):
                    err += '\n'

                if sp.returncode != 0:
                    raise AnsibleError(
                        "Inventory script (%s) had an execution error: %s " %
                        (path, err))

                # make sure script output is unicode so that json loader will output unicode strings itself
                try:
                    data = to_text(stdout, errors="strict")
                except Exception as e:
                    raise AnsibleError(
                        "Inventory {0} contained characters that cannot be interpreted as UTF-8: {1}"
                        .format(path, to_native(e)))

                try:
                    self._cache[cache_key] = self.loader.load(data,
                                                              file_name=path)
                except Exception as e:
                    raise AnsibleError(
                        "failed to parse executable inventory script results from {0}: {1}\n{2}"
                        .format(path, to_native(e), err))

                # if no other errors happened and you want to force displaying stderr, do so now
                if err and self.get_option('always_show_stderr'):
                    self.display.error(msg=to_text(err))

            processed = self._cache[cache_key]
            if not isinstance(processed, Mapping):
                raise AnsibleError(
                    "failed to parse executable inventory script results from {0}: needs to be a json dict\n{1}"
                    .format(path, err))

            group = None
            data_from_meta = None

            # A "_meta" subelement may contain a variable "hostvars" which contains a hash for each host
            # if this "hostvars" exists at all then do not call --host for each # host.
            # This is for efficiency and scripts should still return data
            # if called with --host for backwards compat with 1.2 and earlier.
            for (group, gdata) in processed.items():
                if group == '_meta':
                    if 'hostvars' in gdata:
                        data_from_meta = gdata['hostvars']
                else:
                    self._parse_group(group, gdata)

            for host in self._hosts:
                got = {}
                if data_from_meta is None:
                    got = self.get_host_variables(path, host)
                else:
                    try:
                        got = data_from_meta.get(host, {})
                    except AttributeError as e:
                        raise AnsibleError(
                            "Improperly formatted host information for %s: %s"
                            % (host, to_native(e)))

                self._populate_host_vars([host], got)

        except Exception as e:
            raise AnsibleParserError(to_native(e))
Ejemplo n.º 19
0
    def parse_source(self, source, cache=False):
        ''' Generate or update inventory for the source provided '''

        parsed = False
        display.debug(u'Examining possible inventory source: %s' % source)

        # use binary for path functions
        b_source = to_bytes(source)

        # process directories as a collection of inventories
        if os.path.isdir(b_source):
            display.debug(u'Searching for inventory files in directory: %s' %
                          source)
            for i in sorted(os.listdir(b_source)):

                display.debug(u'Considering %s' % i)
                # Skip hidden files and stuff we explicitly ignore
                if IGNORED.search(i):
                    continue

                # recursively deal with directory entries
                fullpath = to_text(os.path.join(b_source, i),
                                   errors='surrogate_or_strict')
                parsed_this_one = self.parse_source(fullpath, cache=cache)
                display.debug(u'parsed %s as %s' % (fullpath, parsed_this_one))
                if not parsed:
                    parsed = parsed_this_one
        else:
            # left with strings or files, let plugins figure it out

            # set so new hosts can use for inventory_file/dir vasr
            self._inventory.current_source = source

            # get inventory plugins if needed, there should always be at least one generator
            if not self._inventory_plugins:
                self._setup_inventory_plugins()

            # try source with each plugin
            failures = []
            for plugin in self._inventory_plugins:
                plugin_name = to_text(
                    getattr(plugin, '_load_name',
                            getattr(plugin, '_original_path', '')))
                display.debug(u'Attempting to use plugin %s (%s)' %
                              (plugin_name, plugin._original_path))

                # initialize and figure out if plugin wants to attempt parsing this file
                try:
                    plugin_wants = bool(plugin.verify_file(source))
                except Exception:
                    plugin_wants = False

                if plugin_wants:
                    try:
                        # FIXME in case plugin fails 1/2 way we have partial inventory
                        plugin.parse(self._inventory,
                                     self._loader,
                                     source,
                                     cache=cache)
                        parsed = True
                        display.vvv(
                            'Parsed %s inventory source with %s plugin' %
                            (source, plugin_name))
                        break
                    except AnsibleParserError as e:
                        display.debug('%s was not parsable by %s' %
                                      (source, plugin_name))
                        failures.append({
                            'src': source,
                            'plugin': plugin_name,
                            'exc': e
                        })
                    except Exception as e:
                        display.debug('%s failed to parse %s' %
                                      (plugin_name, source))
                        failures.append({
                            'src': source,
                            'plugin': plugin_name,
                            'exc': AnsibleError(e)
                        })
                else:
                    display.vvv(
                        "%s declined parsing %s as it did not pass it's verify_file() method"
                        % (plugin_name, source))
            else:
                if not parsed and failures:
                    # only if no plugin processed files should we show errors.
                    for fail in failures:
                        display.warning(
                            u'\n* Failed to parse %s with %s plugin: %s' %
                            (to_text(fail['src']), fail['plugin'],
                             to_text(fail['exc'])))
                        if hasattr(fail['exc'], 'tb'):
                            display.vvv(to_text(fail['exc'].tb))
                    if C.INVENTORY_ANY_UNPARSED_IS_FAILED:
                        raise AnsibleError(
                            u'Completely failed to parse inventory source %s' %
                            (source))
        if not parsed:
            if source != '/etc/ansible/hosts' or os.path.exists(source):
                # only warn if NOT using the default and if using it, only if the file is present
                display.warning("Unable to parse %s as an inventory source" %
                                source)

        # clear up, jic
        self._inventory.current_source = None

        return parsed
Ejemplo n.º 20
0
    def install(self):
        # the file is a tar, so open it that way and extract it
        # to the specified (or default) roles directory
        local_file = False

        if self.scm:
            # create tar file from scm url
            tmp_file = RoleRequirement.scm_archive_role(**self.spec)
        elif self.src:
            if os.path.isfile(self.src):
                # installing a local tar.gz
                local_file = True
                tmp_file = self.src
            elif '://' in self.src:
                role_data = self.src
                tmp_file = self.fetch(role_data)
            else:
                api = GalaxyAPI(self.galaxy)
                role_data = api.lookup_role_by_name(self.src)
                if not role_data:
                    raise AnsibleError("- sorry, %s was not found on %s." %
                                       (self.src, api.api_server))

                if role_data.get('role_type') == 'CON' and not os.environ.get(
                        'ANSIBLE_CONTAINER'):
                    # Container Enabled, running outside of a container
                    display.warning(
                        "%s is a Container Enabled role and should only be installed using "
                        "Ansible Container" % self.name)

                if role_data.get('role_type') == 'APP':
                    # Container Role
                    display.warning(
                        "%s is a Container App role and should only be installed using Ansible "
                        "Container" % self.name)

                role_versions = api.fetch_role_related('versions',
                                                       role_data['id'])
                if not self.version:
                    # convert the version names to LooseVersion objects
                    # and sort them to get the latest version. If there
                    # are no versions in the list, we'll grab the head
                    # of the master branch
                    if len(role_versions) > 0:
                        loose_versions = [
                            LooseVersion(a.get('name', None))
                            for a in role_versions
                        ]
                        loose_versions.sort()
                        self.version = str(loose_versions[-1])
                    elif role_data.get('github_branch', None):
                        self.version = role_data['github_branch']
                    else:
                        self.version = 'master'
                elif self.version != 'master':
                    if role_versions and str(self.version) not in [
                            a.get('name', None) for a in role_versions
                    ]:
                        raise AnsibleError(
                            "- the specified version (%s) of %s was not found in the list of available versions (%s)."
                            % (self.version, self.name, role_versions))

                tmp_file = self.fetch(role_data)

        else:
            raise AnsibleError("No valid role data found")

        if tmp_file:

            display.debug("installing from %s" % tmp_file)

            if not tarfile.is_tarfile(tmp_file):
                raise AnsibleError("the file downloaded was not a tar.gz")
            else:
                if tmp_file.endswith('.gz'):
                    role_tar_file = tarfile.open(tmp_file, "r:gz")
                else:
                    role_tar_file = tarfile.open(tmp_file, "r")
                # verify the role's meta file
                meta_file = None
                members = role_tar_file.getmembers()
                # next find the metadata file
                for member in members:
                    if self.META_MAIN in member.name:
                        # Look for parent of meta/main.yml
                        # Due to possibility of sub roles each containing meta/main.yml
                        # look for shortest length parent
                        meta_parent_dir = os.path.dirname(
                            os.path.dirname(member.name))
                        if not meta_file:
                            archive_parent_dir = meta_parent_dir
                            meta_file = member
                        else:
                            if len(meta_parent_dir) < len(archive_parent_dir):
                                archive_parent_dir = meta_parent_dir
                                meta_file = member
                if not meta_file:
                    raise AnsibleError(
                        "this role does not appear to have a meta/main.yml file."
                    )
                else:
                    try:
                        self._metadata = yaml.safe_load(
                            role_tar_file.extractfile(meta_file))
                    except:
                        raise AnsibleError(
                            "this role does not appear to have a valid meta/main.yml file."
                        )

                # we strip off any higher-level directories for all of the files contained within
                # the tar file here. The default is 'github_repo-target'. Gerrit instances, on the other
                # hand, does not have a parent directory at all.
                installed = False
                while not installed:
                    display.display("- extracting %s to %s" %
                                    (self.name, self.path))
                    try:
                        if os.path.exists(self.path):
                            if not os.path.isdir(self.path):
                                raise AnsibleError(
                                    "the specified roles path exists and is not a directory."
                                )
                            elif not getattr(self.options, "force", False):
                                raise AnsibleError(
                                    "the specified role %s appears to already exist. Use --force to replace it."
                                    % self.name)
                            else:
                                # using --force, remove the old path
                                if not self.remove():
                                    raise AnsibleError(
                                        "%s doesn't appear to contain a role.\n  please remove this directory manually if you really want to put the role here."
                                        % self.path)
                        else:
                            os.makedirs(self.path)

                        # now we do the actual extraction to the path
                        for member in members:
                            # we only extract files, and remove any relative path
                            # bits that might be in the file for security purposes
                            # and drop any containing directory, as mentioned above
                            if member.isreg() or member.issym():
                                parts = member.name.replace(
                                    archive_parent_dir, "").split(os.sep)
                                final_parts = []
                                for part in parts:
                                    if part != '..' and '~' not in part and '$' not in part:
                                        final_parts.append(part)
                                member.name = os.path.join(*final_parts)
                                role_tar_file.extract(member, self.path)

                        # write out the install info file for later use
                        self._write_galaxy_install_info()
                        installed = True
                    except OSError as e:
                        error = True
                        if e[0] == 13 and len(self.paths) > 1:
                            current = self.paths.index(self.path)
                            nextidx = current + 1
                            if len(self.paths) >= current:
                                self.path = self.paths[nextidx]
                                error = False
                        if error:
                            raise AnsibleError(
                                "Could not update files in %s: %s" %
                                (self.path, str(e)))

                # return the parsed yaml metadata
                display.display("- %s was installed successfully" % str(self))
                if not local_file:
                    try:
                        os.unlink(tmp_file)
                    except (OSError, IOError) as e:
                        display.warning("Unable to remove tmp file (%s): %s" %
                                        (tmp_file, str(e)))
                return True

        return False
Ejemplo n.º 21
0
    def set_options(self, task_keys=None, var_options=None, direct=None):
        if not HAS_WINRM:
            return

        super(Connection, self).set_options(task_keys=None,
                                            var_options=var_options,
                                            direct=direct)

        self._winrm_host = self.get_option('remote_addr')
        self._winrm_user = self.get_option('remote_user')
        self._winrm_pass = self._play_context.password

        self._become_method = self._play_context.become_method
        self._become_user = self._play_context.become_user
        self._become_pass = self._play_context.become_pass

        self._winrm_port = self.get_option('port')

        self._winrm_scheme = self.get_option('scheme')
        # old behaviour, scheme should default to http if not set and the port
        # is 5985 otherwise https
        if self._winrm_scheme is None:
            self._winrm_scheme = 'http' if self._winrm_port == 5985 else 'https'

        self._winrm_path = self.get_option('path')
        self._kinit_cmd = self.get_option('kerberos_command')
        self._winrm_transport = self.get_option('transport')
        self._winrm_connection_timeout = self.get_option('connection_timeout')

        if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'):
            self._winrm_supported_authtypes = set(
                winrm.FEATURE_SUPPORTED_AUTHTYPES)
        else:
            # for legacy versions of pywinrm, use the values we know are supported
            self._winrm_supported_authtypes = set(
                ['plaintext', 'ssl', 'kerberos'])

        # calculate transport if needed
        if self._winrm_transport is None or self._winrm_transport[0] is None:
            # TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic
            transport_selector = [
                'ssl'
            ] if self._winrm_scheme == 'https' else ['plaintext']

            if HAVE_KERBEROS and ((self._winrm_user
                                   and '@' in self._winrm_user)):
                self._winrm_transport = ['kerberos'] + transport_selector
            else:
                self._winrm_transport = transport_selector

        unsupported_transports = set(self._winrm_transport).difference(
            self._winrm_supported_authtypes)

        if unsupported_transports:
            raise AnsibleError(
                'The installed version of WinRM does not support transport(s) %s'
                % to_native(list(unsupported_transports),
                            nonstring='simplerepr'))

        # if kerberos is among our transports and there's a password specified, we're managing the tickets
        kinit_mode = self.get_option('kerberos_mode')
        if kinit_mode is None:
            # HACK: ideally, remove multi-transport stuff
            self._kerb_managed = "kerberos" in self._winrm_transport and (
                self._winrm_pass is not None and self._winrm_pass != "")
        elif kinit_mode == "managed":
            self._kerb_managed = True
        elif kinit_mode == "manual":
            self._kerb_managed = False

        # arg names we're going passing directly
        internal_kwarg_mask = set([
            'self', 'endpoint', 'transport', 'username', 'password', 'scheme',
            'path', 'kinit_mode', 'kinit_cmd'
        ])

        self._winrm_kwargs = dict(username=self._winrm_user,
                                  password=self._winrm_pass)
        argspec = inspect.getargspec(Protocol.__init__)
        supported_winrm_args = set(argspec.args)
        supported_winrm_args.update(internal_kwarg_mask)
        passed_winrm_args = set([
            v.replace('ansible_winrm_', '') for v in self.get_option('_extras')
        ])
        unsupported_args = passed_winrm_args.difference(supported_winrm_args)

        # warn for kwargs unsupported by the installed version of pywinrm
        for arg in unsupported_args:
            display.warning(
                "ansible_winrm_{0} unsupported by pywinrm (is an up-to-date version of pywinrm installed?)"
                .format(arg))

        # pass through matching extras, excluding the list we want to treat specially
        for arg in passed_winrm_args.difference(
                internal_kwarg_mask).intersection(supported_winrm_args):
            self._winrm_kwargs[arg] = self.get_option('_extras')[
                'ansible_winrm_%s' % arg]
Ejemplo n.º 22
0
    def _process_pending_results(self,
                                 iterator,
                                 one_pass=False,
                                 max_passes=None):
        '''
        Reads results off the final queue and takes appropriate action
        based on the result (executing callbacks, updating state, etc.).
        '''

        ret_results = []

        def get_original_host(host_name):
            # FIXME: this should not need x2 _inventory
            host_name = to_text(host_name)
            if host_name in self._inventory.hosts:
                return self._inventory.hosts[host_name]
            else:
                return self._inventory.get_host(host_name)

        def search_handler_blocks_by_name(handler_name, handler_blocks):
            for handler_block in handler_blocks:
                for handler_task in handler_block.block:
                    if handler_task.name:
                        handler_vars = self._variable_manager.get_vars(
                            play=iterator._play, task=handler_task)
                        templar = Templar(loader=self._loader,
                                          variables=handler_vars)
                        try:
                            # first we check with the full result of get_name(), which may
                            # include the role name (if the handler is from a role). If that
                            # is not found, we resort to the simple name field, which doesn't
                            # have anything extra added to it.
                            target_handler_name = templar.template(
                                handler_task.name)
                            if target_handler_name == handler_name:
                                return handler_task
                            else:
                                target_handler_name = templar.template(
                                    handler_task.get_name())
                                if target_handler_name == handler_name:
                                    return handler_task
                        except (UndefinedError, AnsibleUndefinedVariable):
                            # We skip this handler due to the fact that it may be using
                            # a variable in the name that was conditionally included via
                            # set_fact or some other method, and we don't want to error
                            # out unnecessarily
                            continue
            return None

        def search_handler_blocks_by_uuid(handler_uuid, handler_blocks):
            for handler_block in handler_blocks:
                for handler_task in handler_block.block:
                    if handler_uuid == handler_task._uuid:
                        return handler_task
            return None

        def parent_handler_match(target_handler, handler_name):
            if target_handler:
                if isinstance(target_handler, (TaskInclude, IncludeRole)):
                    try:
                        handler_vars = self._variable_manager.get_vars(
                            play=iterator._play, task=target_handler)
                        templar = Templar(loader=self._loader,
                                          variables=handler_vars)
                        target_handler_name = templar.template(
                            target_handler.name)
                        if target_handler_name == handler_name:
                            return True
                        else:
                            target_handler_name = templar.template(
                                target_handler.get_name())
                            if target_handler_name == handler_name:
                                return True
                    except (UndefinedError, AnsibleUndefinedVariable):
                        pass
                return parent_handler_match(target_handler._parent,
                                            handler_name)
            else:
                return False

        cur_pass = 0
        while True:
            try:
                self._results_lock.acquire()
                task_result = self._results.popleft()
            except IndexError:
                break
            finally:
                self._results_lock.release()

            # get the original host and task. We then assign them to the TaskResult for use in callbacks/etc.
            original_host = get_original_host(task_result._host)
            found_task = iterator.get_original_task(original_host,
                                                    task_result._task)
            original_task = found_task.copy(exclude_parent=True,
                                            exclude_tasks=True)
            original_task._parent = found_task._parent
            original_task.from_attrs(task_result._task_fields)

            task_result._host = original_host
            task_result._task = original_task

            # get the correct loop var for use later
            if original_task.loop_control:
                loop_var = original_task.loop_control.loop_var or 'item'
            else:
                loop_var = 'item'

            # send callbacks for 'non final' results
            if '_ansible_retry' in task_result._result:
                self._tqm.send_callback('v2_runner_retry', task_result)
                continue
            elif '_ansible_item_result' in task_result._result:
                if task_result.is_failed() or task_result.is_unreachable():
                    self._tqm.send_callback('v2_runner_item_on_failed',
                                            task_result)
                elif task_result.is_skipped():
                    self._tqm.send_callback('v2_runner_item_on_skipped',
                                            task_result)
                else:
                    if 'diff' in task_result._result:
                        if self._diff:
                            self._tqm.send_callback('v2_on_file_diff',
                                                    task_result)
                    self._tqm.send_callback('v2_runner_item_on_ok',
                                            task_result)
                continue

            if original_task.register:
                host_list = self.get_task_hosts(iterator, original_host,
                                                original_task)

                clean_copy = strip_internal_keys(task_result._result)
                if 'invocation' in clean_copy:
                    del clean_copy['invocation']

                for target_host in host_list:
                    self._variable_manager.set_nonpersistent_facts(
                        target_host, {original_task.register: clean_copy})

            # all host status messages contain 2 entries: (msg, task_result)
            role_ran = False
            if task_result.is_failed():
                role_ran = True
                ignore_errors = original_task.ignore_errors
                if not ignore_errors:
                    display.debug("marking %s as failed" % original_host.name)
                    if original_task.run_once:
                        # if we're using run_once, we have to fail every host here
                        for h in self._inventory.get_hosts(
                                iterator._play.hosts):
                            if h.name not in self._tqm._unreachable_hosts:
                                state, _ = iterator.get_next_task_for_host(
                                    h, peek=True)
                                iterator.mark_host_failed(h)
                                state, new_task = iterator.get_next_task_for_host(
                                    h, peek=True)
                    else:
                        iterator.mark_host_failed(original_host)

                    # increment the failed count for this host
                    self._tqm._stats.increment('failures', original_host.name)

                    # grab the current state and if we're iterating on the rescue portion
                    # of a block then we save the failed task in a special var for use
                    # within the rescue/always
                    state, _ = iterator.get_next_task_for_host(original_host,
                                                               peek=True)

                    if iterator.is_failed(
                            original_host
                    ) and state and state.run_state == iterator.ITERATING_COMPLETE:
                        self._tqm._failed_hosts[original_host.name] = True

                    if state and state.run_state == iterator.ITERATING_RESCUE:
                        self._variable_manager.set_nonpersistent_facts(
                            original_host,
                            dict(
                                ansible_failed_task=original_task.serialize(),
                                ansible_failed_result=task_result._result,
                            ),
                        )
                else:
                    self._tqm._stats.increment('ok', original_host.name)
                    if 'changed' in task_result._result and task_result._result[
                            'changed']:
                        self._tqm._stats.increment('changed',
                                                   original_host.name)
                self._tqm.send_callback('v2_runner_on_failed',
                                        task_result,
                                        ignore_errors=ignore_errors)
            elif task_result.is_unreachable():
                self._tqm._unreachable_hosts[original_host.name] = True
                iterator._play._removed_hosts.append(original_host.name)
                self._tqm._stats.increment('dark', original_host.name)
                self._tqm.send_callback('v2_runner_on_unreachable',
                                        task_result)
            elif task_result.is_skipped():
                self._tqm._stats.increment('skipped', original_host.name)
                self._tqm.send_callback('v2_runner_on_skipped', task_result)
            else:
                role_ran = True

                if original_task.loop:
                    # this task had a loop, and has more than one result, so
                    # loop over all of them instead of a single result
                    result_items = task_result._result.get('results', [])
                else:
                    result_items = [task_result._result]

                for result_item in result_items:
                    if '_ansible_notify' in result_item:
                        if task_result.is_changed():
                            # The shared dictionary for notified handlers is a proxy, which
                            # does not detect when sub-objects within the proxy are modified.
                            # So, per the docs, we reassign the list so the proxy picks up and
                            # notifies all other threads
                            for handler_name in result_item['_ansible_notify']:
                                found = False
                                # Find the handler using the above helper.  First we look up the
                                # dependency chain of the current task (if it's from a role), otherwise
                                # we just look through the list of handlers in the current play/all
                                # roles and use the first one that matches the notify name
                                target_handler = search_handler_blocks_by_name(
                                    handler_name, iterator._play.handlers)
                                if target_handler is not None:
                                    found = True
                                    if target_handler._uuid not in self._notified_handlers:
                                        self._notified_handlers[
                                            target_handler._uuid] = []
                                    if original_host not in self._notified_handlers[
                                            target_handler._uuid]:
                                        self._notified_handlers[
                                            target_handler._uuid].append(
                                                original_host)
                                        # FIXME: should this be a callback?
                                        display.vv("NOTIFIED HANDLER %s" %
                                                   (handler_name, ))
                                else:
                                    # As there may be more than one handler with the notified name as the
                                    # parent, so we just keep track of whether or not we found one at all
                                    for target_handler_uuid in self._notified_handlers:
                                        target_handler = search_handler_blocks_by_uuid(
                                            target_handler_uuid,
                                            iterator._play.handlers)
                                        if target_handler and parent_handler_match(
                                                target_handler, handler_name):
                                            found = True
                                            if original_host not in self._notified_handlers[
                                                    target_handler._uuid]:
                                                self._notified_handlers[
                                                    target_handler.
                                                    _uuid].append(
                                                        original_host)
                                                display.vv(
                                                    "NOTIFIED HANDLER %s" %
                                                    (target_handler.get_name(),
                                                     ))

                                if handler_name in self._listening_handlers:
                                    for listening_handler_uuid in self._listening_handlers[
                                            handler_name]:
                                        listening_handler = search_handler_blocks_by_uuid(
                                            listening_handler_uuid,
                                            iterator._play.handlers)
                                        if listening_handler is not None:
                                            found = True
                                        else:
                                            continue
                                        if original_host not in self._notified_handlers[
                                                listening_handler._uuid]:
                                            self._notified_handlers[
                                                listening_handler.
                                                _uuid].append(original_host)
                                            display.vv(
                                                "NOTIFIED HANDLER %s" %
                                                (listening_handler.get_name(),
                                                 ))

                                # and if none were found, then we raise an error
                                if not found:
                                    msg = (
                                        "The requested handler '%s' was not found in either the main handlers list nor in the listening "
                                        "handlers list" % handler_name)
                                    if C.ERROR_ON_MISSING_HANDLER:
                                        raise AnsibleError(msg)
                                    else:
                                        display.warning(msg)

                    if 'add_host' in result_item:
                        # this task added a new host (add_host module)
                        new_host_info = result_item.get('add_host', dict())
                        self._add_host(new_host_info, iterator)

                    elif 'add_group' in result_item:
                        # this task added a new group (group_by module)
                        self._add_group(original_host, result_item)

                    if 'ansible_facts' in result_item:

                        # if delegated fact and we are delegating facts, we need to change target host for them
                        if original_task.delegate_to is not None and original_task.delegate_facts:
                            host_list = self.get_delegated_hosts(
                                result_item, original_task)
                        else:
                            host_list = self.get_task_hosts(
                                iterator, original_host, original_task)

                        if original_task.action == 'include_vars':
                            for (var_name, var_value) in iteritems(
                                    result_item['ansible_facts']):
                                # find the host we're actually referring too here, which may
                                # be a host that is not really in inventory at all
                                for target_host in host_list:
                                    self._variable_manager.set_host_variable(
                                        target_host, var_name, var_value)
                        else:
                            cacheable = result_item.pop(
                                'ansible_facts_cacheable', True)
                            for target_host in host_list:
                                if cacheable:
                                    self._variable_manager.set_host_facts(
                                        target_host,
                                        result_item['ansible_facts'].copy())

                                # If we are setting a fact, it should populate non_persistent_facts as well
                                self._variable_manager.set_nonpersistent_facts(
                                    target_host,
                                    result_item['ansible_facts'].copy())

                    if 'ansible_stats' in result_item and 'data' in result_item[
                            'ansible_stats'] and result_item['ansible_stats'][
                                'data']:

                        if 'per_host' not in result_item[
                                'ansible_stats'] or result_item[
                                    'ansible_stats']['per_host']:
                            host_list = self.get_task_hosts(
                                iterator, original_host, original_task)
                        else:
                            host_list = [None]

                        data = result_item['ansible_stats']['data']
                        aggregate = 'aggregate' in result_item[
                            'ansible_stats'] and result_item['ansible_stats'][
                                'aggregate']
                        for myhost in host_list:
                            for k in data.keys():
                                if aggregate:
                                    self._tqm._stats.update_custom_stats(
                                        k, data[k], myhost)
                                else:
                                    self._tqm._stats.set_custom_stats(
                                        k, data[k], myhost)

                if 'diff' in task_result._result:
                    if self._diff:
                        self._tqm.send_callback('v2_on_file_diff', task_result)

                if not isinstance(original_task, TaskInclude):
                    self._tqm._stats.increment('ok', original_host.name)
                    if 'changed' in task_result._result and task_result._result[
                            'changed']:
                        self._tqm._stats.increment('changed',
                                                   original_host.name)

                # finally, send the ok for this task
                self._tqm.send_callback('v2_runner_on_ok', task_result)

            self._pending_results -= 1
            if original_host.name in self._blocked_hosts:
                del self._blocked_hosts[original_host.name]

            # If this is a role task, mark the parent role as being run (if
            # the task was ok or failed, but not skipped or unreachable)
            if original_task._role is not None and role_ran:  # TODO:  and original_task.action != 'include_role':?
                # lookup the role in the ROLE_CACHE to make sure we're dealing
                # with the correct object and mark it as executed
                for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[
                        original_task._role._role_name]):
                    if role_obj._uuid == original_task._role._uuid:
                        role_obj._had_task_run[original_host.name] = True

            ret_results.append(task_result)

            if one_pass or max_passes is not None and (cur_pass +
                                                       1) >= max_passes:
                break

            cur_pass += 1

        return ret_results
Ejemplo n.º 23
0
    def _winrm_exec(self,
                    command,
                    args=(),
                    from_exec=False,
                    stdin_iterator=None):
        if not self.protocol:
            self.protocol = self._winrm_connect()
            self._connected = True
        if from_exec:
            display.vvvvv("WINRM EXEC %r %r" % (command, args),
                          host=self._winrm_host)
        else:
            display.vvvvvv("WINRM EXEC %r %r" % (command, args),
                           host=self._winrm_host)
        command_id = None
        try:
            stdin_push_failed = False
            command_id = self.protocol.run_command(
                self.shell_id,
                to_bytes(command),
                map(to_bytes, args),
                console_mode_stdin=(stdin_iterator is None))

            try:
                if stdin_iterator:
                    for (data, is_last) in stdin_iterator:
                        self._winrm_send_input(self.protocol,
                                               self.shell_id,
                                               command_id,
                                               data,
                                               eof=is_last)

            except Exception as ex:
                display.warning("FATAL ERROR DURING FILE TRANSFER: %s" %
                                to_text(ex))
                stdin_push_failed = True

            # NB: this can hang if the receiver is still running (eg, network failed a Send request but the server's still happy).
            # FUTURE: Consider adding pywinrm status check/abort operations to see if the target is still running after a failure.
            resptuple = self.protocol.get_command_output(
                self.shell_id, command_id)
            # ensure stdout/stderr are text for py3
            # FUTURE: this should probably be done internally by pywinrm
            response = Response(
                tuple(
                    to_text(v) if isinstance(v, binary_type) else v
                    for v in resptuple))

            # TODO: check result from response and set stdin_push_failed if we have nonzero
            if from_exec:
                display.vvvvv('WINRM RESULT %r' % to_text(response),
                              host=self._winrm_host)
            else:
                display.vvvvvv('WINRM RESULT %r' % to_text(response),
                               host=self._winrm_host)

            display.vvvvvv('WINRM STDOUT %s' % to_text(response.std_out),
                           host=self._winrm_host)
            display.vvvvvv('WINRM STDERR %s' % to_text(response.std_err),
                           host=self._winrm_host)

            if stdin_push_failed:
                stderr = to_bytes(response.std_err, encoding='utf-8')
                if self.is_clixml(stderr):
                    stderr = self.parse_clixml_stream(stderr)

                raise AnsibleError(
                    'winrm send_input failed; \nstdout: %s\nstderr %s' %
                    (to_native(response.std_out), to_native(stderr)))

            return response
        finally:
            if command_id:
                self.protocol.cleanup_command(self.shell_id, command_id)
Ejemplo n.º 24
0
    def _execute_meta(self, task, play_context, iterator, target_host):

        # meta tasks store their args in the _raw_params field of args,
        # since they do not use k=v pairs, so get that
        meta_action = task.args.get('_raw_params')

        # FIXME(s):
        # * raise an error or show a warning when a conditional is used
        #   on a meta task that doesn't support them

        def _evaluate_conditional(h):
            all_vars = self._variable_manager.get_vars(play=iterator._play,
                                                       host=h,
                                                       task=task)
            templar = Templar(loader=self._loader, variables=all_vars)
            return task.evaluate_conditional(templar, all_vars)

        skipped = False
        msg = ''
        if meta_action == 'noop':
            # FIXME: issue a callback for the noop here?
            msg = "noop"
        elif meta_action == 'flush_handlers':
            self.run_handlers(iterator, play_context)
            msg = "ran handlers"
        elif meta_action == 'refresh_inventory':
            self._inventory.refresh_inventory()
            msg = "inventory successfully refreshed"
        elif meta_action == 'clear_facts':
            if _evaluate_conditional(target_host):
                for host in self._inventory.get_hosts(iterator._play.hosts):
                    hostname = host.get_name()
                    self._variable_manager.clear_facts(hostname)
                msg = "facts cleared"
            else:
                skipped = True
        elif meta_action == 'clear_host_errors':
            if _evaluate_conditional(target_host):
                for host in self._inventory.get_hosts(iterator._play.hosts):
                    self._tqm._failed_hosts.pop(host.name, False)
                    self._tqm._unreachable_hosts.pop(host.name, False)
                    iterator._host_states[
                        host.name].fail_state = iterator.FAILED_NONE
                msg = "cleared host errors"
            else:
                skipped = True
        elif meta_action == 'end_play':
            if _evaluate_conditional(target_host):
                for host in self._inventory.get_hosts(iterator._play.hosts):
                    if host.name not in self._tqm._unreachable_hosts:
                        iterator._host_states[
                            host.name].run_state = iterator.ITERATING_COMPLETE
                msg = "ending play"
        elif meta_action == 'reset_connection':
            connection = connection_loader.get(play_context.connection,
                                               play_context, os.devnull)
            if connection:
                connection.reset()
                msg = 'reset connection'
            else:
                msg = 'no connection, nothing to reset'
        else:
            raise AnsibleError("invalid meta action requested: %s" %
                               meta_action,
                               obj=task._ds)

        result = {'msg': msg}
        if skipped:
            result['skipped'] = True
        else:
            result['changed'] = False

        display.vv("META: %s" % msg)

        return [TaskResult(target_host, task, result)]
Ejemplo n.º 25
0
    def fetch_file(self, in_path, out_path):
        super(Connection, self).fetch_file(in_path, out_path)
        in_path = self._shell._unquote(in_path)
        out_path = out_path.replace('\\', '/')
        display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path),
                    host=self._winrm_host)
        buffer_size = 2**19  # 0.5MB chunks
        makedirs_safe(os.path.dirname(out_path))
        out_file = None
        try:
            offset = 0
            while True:
                try:
                    script = '''
                        $path = "%(path)s"
                        If (Test-Path -Path $path -PathType Leaf)
                        {
                            $buffer_size = %(buffer_size)d
                            $offset = %(offset)d

                            $stream = New-Object -TypeName IO.FileStream($path, [IO.FileMode]::Open, [IO.FileAccess]::Read, [IO.FileShare]::ReadWrite)
                            $stream.Seek($offset, [System.IO.SeekOrigin]::Begin) > $null
                            $buffer = New-Object -TypeName byte[] $buffer_size
                            $bytes_read = $stream.Read($buffer, 0, $buffer_size)
                            if ($bytes_read -gt 0) {
                                $bytes = $buffer[0..($bytes_read - 1)]
                                [System.Convert]::ToBase64String($bytes)
                            }
                            $stream.Close() > $null
                        }
                        ElseIf (Test-Path -Path $path -PathType Container)
                        {
                            Write-Host "[DIR]";
                        }
                        Else
                        {
                            Write-Error "$path does not exist";
                            Exit 1;
                        }
                    ''' % dict(buffer_size=buffer_size,
                               path=self._shell._escape(in_path),
                               offset=offset)
                    display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' %
                                  (in_path, out_path, offset),
                                  host=self._winrm_host)
                    cmd_parts = self._shell._encode_script(script,
                                                           as_list=True,
                                                           preserve_rc=False)
                    result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
                    if result.status_code != 0:
                        raise IOError(to_native(result.std_err))
                    if result.std_out.strip() == '[DIR]':
                        data = None
                    else:
                        data = base64.b64decode(result.std_out.strip())
                    if data is None:
                        makedirs_safe(out_path)
                        break
                    else:
                        if not out_file:
                            # If out_path is a directory and we're expecting a file, bail out now.
                            if os.path.isdir(
                                    to_bytes(out_path,
                                             errors='surrogate_or_strict')):
                                break
                            out_file = open(
                                to_bytes(out_path,
                                         errors='surrogate_or_strict'), 'wb')
                        out_file.write(data)
                        if len(data) < buffer_size:
                            break
                        offset += len(data)
                except Exception:
                    traceback.print_exc()
                    raise AnsibleError('failed to transfer file to "%s"' %
                                       to_native(out_path))
        finally:
            if out_file:
                out_file.close()
Ejemplo n.º 26
0
    def _fixup_perms2(self, remote_paths, remote_user, execute=True):
        """
        We need the files we upload to be readable (and sometimes executable)
        by the user being sudo'd to but we want to limit other people's access
        (because the files could contain passwords or other private
        information.  We achieve this in one of these ways:

        * If no sudo is performed or the remote_user is sudo'ing to
          themselves, we don't have to change permissions.
        * If the remote_user sudo's to a privileged user (for instance, root),
          we don't have to change permissions
        * If the remote_user sudo's to an unprivileged user then we attempt to
          grant the unprivileged user access via file system acls.
        * If granting file system acls fails we try to change the owner of the
          file with chown which only works in case the remote_user is
          privileged or the remote systems allows chown calls by unprivileged
          users (e.g. HP-UX)
        * If the chown fails we can set the file to be world readable so that
          the second unprivileged user can read the file.
          Since this could allow other users to get access to private
          information we only do this ansible is configured with
          "allow_world_readable_tmpfiles" in the ansible.cfg
        """
        if self._connection._shell.SHELL_FAMILY == 'powershell':
            # This won't work on Powershell as-is, so we'll just completely skip until
            # we have a need for it, at which point we'll have to do something different.
            return remote_paths

        if self._play_context.become and self._play_context.become_user not in ('root', remote_user):
            # Unprivileged user that's different than the ssh user.  Let's get
            # to work!

            # Try to use file system acls to make the files readable for sudo'd
            # user
            if execute:
                chmod_mode = 'rx'
                setfacl_mode = 'r-x'
            else:
                chmod_mode = 'rX'
                ### Note: this form fails silently on freebsd.  We currently
                # never call _fixup_perms2() with execute=False but if we
                # start to we'll have to fix this.
                setfacl_mode = 'r-X'

            res = self._remote_set_user_facl(remote_paths, self._play_context.become_user, setfacl_mode)
            if res['rc'] != 0:
                # File system acls failed; let's try to use chown next
                # Set executable bit first as on some systems an
                # unprivileged user can use chown
                if execute:
                    res = self._remote_chmod(remote_paths, 'u+x')
                    if res['rc'] != 0:
                        raise AnsibleError('Failed to set file mode on remote temporary files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))

                res = self._remote_chown(remote_paths, self._play_context.become_user)
                if res['rc'] != 0 and remote_user == 'root':
                    # chown failed even if remove_user is root
                    raise AnsibleError('Failed to change ownership of the temporary files Ansible needs to create despite connecting as root.'
                            '  Unprivileged become user would be unable to read the file.')
                elif res['rc'] != 0:
                    if C.ALLOW_WORLD_READABLE_TMPFILES:
                        # chown and fs acls failed -- do things this insecure
                        # way only if the user opted in in the config file
                        display.warning('Using world-readable permissions for temporary files Ansible needs to create when becoming an unprivileged user.'
                                ' This may be insecure. For information on securing this, see'
                                ' https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user')
                        res = self._remote_chmod(remote_paths, 'a+%s' % chmod_mode)
                        if res['rc'] != 0:
                            raise AnsibleError('Failed to set file mode on remote files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
                    else:
                        raise AnsibleError('Failed to set permissions on the temporary files Ansible needs to create when becoming an unprivileged user'
                                ' (rc: {0}, err: {1}). For information on working around this,'
                                ' see https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user'.format(res['rc'], to_native(res['stderr'])))
        elif execute:
            # Can't depend on the file being transferred with execute
            # permissions.  Only need user perms because no become was
            # used here
            res = self._remote_chmod(remote_paths, 'u+x')
            if res['rc'] != 0:
                raise AnsibleError('Failed to set file mode on remote files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))

        return remote_paths
Ejemplo n.º 27
0
    def _load_included_file(self, included_file, iterator, is_handler=False):
        '''
        Loads an included YAML file of tasks, applying the optional set of variables.
        '''

        display.debug("loading included file: %s" % included_file._filename)
        try:
            data = self._loader.load_from_file(included_file._filename)
            if data is None:
                return []
            elif not isinstance(data, list):
                raise AnsibleError("included task files must contain a list of tasks")

            block_list = load_list_of_blocks(
                data,
                play=included_file._task._block._play,
                parent_block=None,
                task_include=included_file._task,
                role=included_file._task._role,
                use_handlers=is_handler,
                loader=self._loader
            )

            # since we skip incrementing the stats when the task result is
            # first processed, we do so now for each host in the list
            for host in included_file._hosts:
                self._tqm._stats.increment('ok', host.name)

        except AnsibleError as e:
            # mark all of the hosts including this file as failed, send callbacks,
            # and increment the stats for this host
            for host in included_file._hosts:
                tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=to_unicode(e)))
                iterator.mark_host_failed(host)
                self._tqm._failed_hosts[host.name] = True
                self._tqm._stats.increment('failures', host.name)
                self._tqm.send_callback('v2_runner_on_failed', tr)
            return []

        # set the vars for this task from those specified as params to the include
        for b in block_list:
            # first make a copy of the including task, so that each has a unique copy to modify
            b._task_include = b._task_include.copy()
            # then we create a temporary set of vars to ensure the variable reference is unique
            temp_vars = b._task_include.vars.copy()
            temp_vars.update(included_file._args.copy())
            # pop tags out of the include args, if they were specified there, and assign
            # them to the include. If the include already had tags specified, we raise an
            # error so that users know not to specify them both ways
            tags = temp_vars.pop('tags', [])
            if isinstance(tags, string_types):
                tags = tags.split(',')
            if len(tags) > 0:
                if len(b._task_include.tags) > 0:
                    raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement",
                            obj=included_file._task._ds)
                display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option")
                b._task_include.tags = tags
            b._task_include.vars = temp_vars

        # finally, send the callback and return the list of blocks loaded
        self._tqm.send_callback('v2_playbook_on_include', included_file)
        display.debug("done processing included file")
        return block_list
Ejemplo n.º 28
0
    def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True):
        '''
        Transfer and run a module along with its arguments.
        '''
        if task_vars is None:
            task_vars = dict()

        # if a module name was not specified for this execution, use
        # the action from the task
        if module_name is None:
            module_name = self._task.action
        if module_args is None:
            module_args = self._task.args

        # set check mode in the module arguments, if required
        if self._play_context.check_mode:
            if not self._supports_check_mode:
                raise AnsibleError("check mode is not supported for this operation")
            module_args['_ansible_check_mode'] = True
        else:
            module_args['_ansible_check_mode'] = False

        # Get the connection user for permission checks
        remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user

        # set no log in the module arguments, if required
        module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG

        # set debug in the module arguments, if required
        module_args['_ansible_debug'] = C.DEFAULT_DEBUG

        # let module know we are in diff mode
        module_args['_ansible_diff'] = self._play_context.diff

        # let module know our verbosity
        module_args['_ansible_verbosity'] = display.verbosity

        # give the module information about the ansible version
        module_args['_ansible_version'] = __version__

        # give the module information about its name
        module_args['_ansible_module_name'] = module_name

        # set the syslog facility to be used in the module
        module_args['_ansible_syslog_facility'] = task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY)

        # let module know about filesystems that selinux treats specially
        module_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS

        (module_style, shebang, module_data, module_path) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
        display.vvv("Using module file %s" % module_path)
        if not shebang and module_style != 'binary':
            raise AnsibleError("module (%s) is missing interpreter line" % module_name)

        # a remote tmp path may be necessary and not already created
        remote_module_path = None
        args_file_path = None
        if not tmp and self._late_needs_tmp_path(tmp, module_style):
            tmp = self._make_tmp_path(remote_user)

        if tmp and \
         (module_style != 'new' or \
         not self._connection.has_pipelining or \
         not self._play_context.pipelining or \
         C.DEFAULT_KEEP_REMOTE_FILES or \
         self._play_context.become_method == 'su'):
            remote_module_filename = self._connection._shell.get_remote_filename(module_path)
            remote_module_path = self._connection._shell.join_path(tmp, remote_module_filename)
            if module_style in ('old', 'non_native_want_json', 'binary'):
                # we'll also need a temp file to hold our module arguments
                args_file_path = self._connection._shell.join_path(tmp, 'args')

        if remote_module_path or module_style != 'new':
            display.debug("transferring module to remote %s" % remote_module_path)
            if module_style == 'binary':
                self._transfer_file(module_path, remote_module_path)
            else:
                self._transfer_data(remote_module_path, module_data)
            if module_style == 'old':
                # we need to dump the module args to a k=v string in a file on
                # the remote system, which can be read and parsed by the module
                args_data = ""
                for k,v in iteritems(module_args):
                    args_data += '%s=%s ' % (k, pipes.quote(text_type(v)))
                self._transfer_data(args_file_path, args_data)
            elif module_style in ('non_native_want_json', 'binary'):
                self._transfer_data(args_file_path, json.dumps(module_args))
            display.debug("done transferring module to remote")

        environment_string = self._compute_environment_string()

        remote_files = None

        if args_file_path:
            remote_files = tmp, remote_module_path, args_file_path
        elif remote_module_path:
            remote_files = tmp, remote_module_path

        # Fix permissions of the tmp path and tmp files.  This should be
        # called after all files have been transferred.
        if remote_files:
            self._fixup_perms2(remote_files, remote_user)

        cmd = ""
        in_data = None

        if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':
            in_data = module_data
        else:
            if remote_module_path:
                cmd = remote_module_path

        rm_tmp = None
        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if not self._play_context.become or self._play_context.become_user == 'root':
                # not sudoing or sudoing to root, so can cleanup files in the same step
                rm_tmp = tmp

        cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path, rm_tmp=rm_tmp)
        cmd = cmd.strip()

        sudoable = True
        if module_name == "accelerate":
            # always run the accelerate module as the user
            # specified in the play, not the sudo_user
            sudoable = False

        res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)

        if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
            if self._play_context.become and self._play_context.become_user != 'root':
                # not sudoing to root, so maybe can't delete files as that other user
                # have to clean up temp files as original user in a second step
                tmp_rm_cmd = self._connection._shell.remove(tmp, recurse=True)
                tmp_rm_res = self._low_level_execute_command(tmp_rm_cmd, sudoable=False)
                tmp_rm_data = self._parse_returned_data(tmp_rm_res)
                if tmp_rm_data.get('rc', 0) != 0:
                    display.warning('Error deleting remote temporary files (rc: {0}, stderr: {1})'.format(tmp_rm_res.get('rc'),
                        tmp_rm_res.get('stderr', 'No error string available.')))

        # parse the main result
        data = self._parse_returned_data(res)

        # pre-split stdout into lines, if stdout is in the data and there
        # isn't already a stdout_lines value there
        if 'stdout' in data and 'stdout_lines' not in data:
            data['stdout_lines'] = data.get('stdout', u'').splitlines()

        display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
        return data
Ejemplo n.º 29
0
 def wrapped(self, *args, **kwargs):
     prompt = self._connection.get_prompt()
     if not to_text(prompt,
                    errors='surrogate_or_strict').strip().endswith('#'):
         raise AnsibleError('operation requires privilege escalation')
     return func(self, *args, **kwargs)
Ejemplo n.º 30
0
    def _check_conditional(self, conditional, templar, all_vars):
        '''
        This method does the low-level evaluation of each conditional
        set on this object, using jinja2 to wrap the conditionals for
        evaluation.
        '''

        original = conditional
        if conditional is None or conditional == '':
            return True

        if conditional in all_vars and re.match("^[_A-Za-z][_a-zA-Z0-9]*$",
                                                conditional):
            conditional = all_vars[conditional]

        # make sure the templar is using the variables specified with this method
        templar.set_available_variables(variables=all_vars)

        try:
            conditional = templar.template(conditional)
            if not isinstance(conditional, text_type) or conditional == "":
                return conditional

            # a Jinja2 evaluation that results in something Python can eval!
            disable_lookups = False
            if hasattr(conditional, '__UNSAFE__'):
                disable_lookups = True

            presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
            val = templar.template(presented,
                                   disable_lookups=disable_lookups).strip()
            if val == "True":
                return True
            elif val == "False":
                return False
            else:
                raise AnsibleError("unable to evaluate conditional: %s" %
                                   original)
        except (AnsibleUndefinedVariable, UndefinedError) as e:
            # the templating failed, meaning most likely a variable was undefined. If we happened to be
            # looking for an undefined variable, return True, otherwise fail
            try:
                # first we extract the variable name from the error message
                var_name = re.compile(
                    r"'(hostvars\[.+\]|[\w_]+)' is undefined").search(
                        str(e)).groups()[0]
                # next we extract all defined/undefined tests from the conditional string
                def_undef = self.extract_defined_undefined(conditional)
                # then we loop through these, comparing the error variable name against
                # each def/undef test we found above. If there is a match, we determine
                # whether the logic/state mean the variable should exist or not and return
                # the corresponding True/False
                for (du_var, logic, state) in def_undef:
                    # when we compare the var names, normalize quotes because something
                    # like hostvars['foo'] may be tested against hostvars["foo"]
                    if var_name.replace("'", '"') == du_var.replace("'", '"'):
                        # the should exist is a xor test between a negation in the logic portion
                        # against the state (defined or undefined)
                        should_exist = ('not' in logic) != (state == 'defined')
                        if should_exist:
                            return False
                        else:
                            return True
                # as nothing above matched the failed var name, re-raise here to
                # trigger the AnsibleUndefinedVariable exception again below
                raise
            except Exception as new_e:
                raise AnsibleUndefinedVariable(
                    "error while evaluating conditional (%s): %s" %
                    (original, e))
Ejemplo n.º 31
0
 def test_basic_unicode_error(self):
     e = AnsibleError(self.unicode_message)
     self.assertEqual(e.message, self.unicode_message)
     self.assertEqual(e.__repr__(), self.unicode_message)
Ejemplo n.º 32
0
    def set_host_overrides(self, host, hostvars=None):
        '''
        Override WinRM-specific options from host variables.
        '''
        self._winrm_host = self._play_context.remote_addr
        self._winrm_port = int(self._play_context.port or 5986)
        self._winrm_scheme = hostvars.get(
            'ansible_winrm_scheme',
            'http' if self._winrm_port == 5985 else 'https')
        self._winrm_path = hostvars.get('ansible_winrm_path', '/wsman')
        self._winrm_user = self._play_context.remote_user
        self._winrm_pass = self._play_context.password

        self._kinit_cmd = hostvars.get('ansible_winrm_kinit_cmd', 'kinit')

        if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'):
            self._winrm_supported_authtypes = set(
                winrm.FEATURE_SUPPORTED_AUTHTYPES)
        else:
            # for legacy versions of pywinrm, use the values we know are supported
            self._winrm_supported_authtypes = set(
                ['plaintext', 'ssl', 'kerberos'])

        # TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic
        transport_selector = 'ssl' if self._winrm_scheme == 'https' else 'plaintext'

        if HAVE_KERBEROS and ((self._winrm_user and '@' in self._winrm_user)):
            self._winrm_transport = 'kerberos,%s' % transport_selector
        else:
            self._winrm_transport = transport_selector
        self._winrm_transport = hostvars.get('ansible_winrm_transport',
                                             self._winrm_transport)
        if isinstance(self._winrm_transport, string_types):
            self._winrm_transport = [
                x.strip() for x in self._winrm_transport.split(',')
                if x.strip()
            ]

        unsupported_transports = set(self._winrm_transport).difference(
            self._winrm_supported_authtypes)

        if unsupported_transports:
            raise AnsibleError(
                'The installed version of WinRM does not support transport(s) %s'
                % list(unsupported_transports))

        # if kerberos is among our transports and there's a password specified, we're managing the tickets
        kinit_mode = str(hostvars.get('ansible_winrm_kinit_mode', '')).strip()
        if kinit_mode == "":
            # HACK: ideally, remove multi-transport stuff
            self._kerb_managed = "kerberos" in self._winrm_transport and self._winrm_pass
        elif kinit_mode == "managed":
            self._kerb_managed = True
        elif kinit_mode == "manual":
            self._kerb_managed = False
        else:
            raise AnsibleError('Unknown ansible_winrm_kinit_mode value: %s' %
                               kinit_mode)

        # arg names we're going passing directly
        internal_kwarg_mask = set([
            'self', 'endpoint', 'transport', 'username', 'password', 'scheme',
            'path'
        ])

        self._winrm_kwargs = dict(username=self._winrm_user,
                                  password=self._winrm_pass)
        argspec = inspect.getargspec(Protocol.__init__)
        supported_winrm_args = set(argspec.args)
        supported_winrm_args.update(internal_kwarg_mask)
        passed_winrm_args = set([
            v.replace('ansible_winrm_', '') for v in hostvars
            if v.startswith('ansible_winrm_')
        ])
        unsupported_args = passed_winrm_args.difference(supported_winrm_args)

        # warn for kwargs unsupported by the installed version of pywinrm
        for arg in unsupported_args:
            display.warning(
                "ansible_winrm_{0} unsupported by pywinrm (is an up-to-date version of pywinrm installed?)"
                .format(arg))

        # pass through matching kwargs, excluding the list we want to treat specially
        for arg in passed_winrm_args.difference(
                internal_kwarg_mask).intersection(supported_winrm_args):
            self._winrm_kwargs[arg] = hostvars['ansible_winrm_%s' % arg]