Beispiel #1
0
    def _run_loop(self, items):
        '''
        Runs the task with the loop items specified and collates the result
        into an array named 'results' which is inserted into the final result
        along with the item for which the loop ran.
        '''

        results = []

        # make copies of the job vars and task so we can add the item to
        # the variables and re-validate the task with the item variable
        # task_vars = self._job_vars.copy()
        task_vars = self._job_vars

        loop_var = 'item'
        index_var = None
        label = None
        loop_pause = 0
        templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars)

        # FIXME: move this to the object itself to allow post_validate to take care of templating (loop_control.post_validate)
        if self._task.loop_control:
            loop_var = templar.template(self._task.loop_control.loop_var)
            index_var = templar.template(self._task.loop_control.index_var)
            loop_pause = templar.template(self._task.loop_control.pause)

            # This may be 'None',so it is tempalted below after we ensure a value and an item is assigned
            label = self._task.loop_control.label

        # ensure we always have a label
        if label is None:
            label = '{{' + loop_var + '}}'

        if loop_var in task_vars:
            display.warning(u"The loop variable '%s' is already in use. "
                            u"You should set the `loop_var` value in the `loop_control` option for the task"
                            u" to something else to avoid variable collisions and unexpected behavior." % loop_var)

        ran_once = False
        if self._task.loop_with:
            # Only squash with 'with_:' not with the 'loop:', 'magic' squashing can be removed once with_ loops are
            items = self._squash_items(items, loop_var, task_vars)

        for item_index, item in enumerate(items):
            task_vars[loop_var] = item
            if index_var:
                task_vars[index_var] = item_index

            # Update template vars to reflect current loop iteration
            templar.set_available_variables(task_vars)

            # pause between loop iterations
            if loop_pause and ran_once:
                try:
                    time.sleep(float(loop_pause))
                except ValueError as e:
                    raise AnsibleError('Invalid pause value: %s, produced error: %s' % (loop_pause, to_native(e)))
            else:
                ran_once = True

            try:
                tmp_task = self._task.copy(exclude_parent=True, exclude_tasks=True)
                tmp_task._parent = self._task._parent
                tmp_play_context = self._play_context.copy()
            except AnsibleParserError as e:
                results.append(dict(failed=True, msg=to_text(e)))
                continue

            # now we swap the internal task and play context with their copies,
            # execute, and swap them back so we can do the next iteration cleanly
            (self._task, tmp_task) = (tmp_task, self._task)
            (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
            res = self._execute(variables=task_vars)
            task_fields = self._task.dump_attrs()
            (self._task, tmp_task) = (tmp_task, self._task)
            (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)

            # now update the result with the item info, and append the result
            # to the list of results
            res[loop_var] = item
            if index_var:
                res[index_var] = item_index
            res['_ansible_item_result'] = True
            res['_ansible_ignore_errors'] = task_fields.get('ignore_errors')

            # gets templated here unlike rest of loop_control fields, depends on loop_var above
            res['_ansible_item_label'] = templar.template(label, cache=False)

            self._rslt_q.put(
                TaskResult(
                    self._host.name,
                    self._task._uuid,
                    res,
                    task_fields=task_fields,
                ),
                block=False,
            )
            results.append(res)
            del task_vars[loop_var]

        return results
Beispiel #2
0
    def _execute(self, variables=None):
        '''
        The primary workhorse of the executor system, this runs the task
        on the specified host (which may be the delegated_to host) and handles
        the retry/until and block rescue/always execution
        '''

        if variables is None:
            variables = self._job_vars

        templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)

        context_validation_error = None
        try:
            # apply the given task's information to the connection info,
            # which may override some fields already set by the play or
            # the options specified on the command line
            self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar)

            # fields set from the play/task may be based on variables, so we have to
            # do the same kind of post validation step on it here before we use it.
            self._play_context.post_validate(templar=templar)

            # now that the play context is finalized, if the remote_addr is not set
            # default to using the host's address field as the remote address
            if not self._play_context.remote_addr:
                self._play_context.remote_addr = self._host.address

            # We also add "magic" variables back into the variables dict to make sure
            # a certain subset of variables exist.
            self._play_context.update_vars(variables)

            # FIXME: update connection/shell plugin options
        except AnsibleError as e:
            # save the error, which we'll raise later if we don't end up
            # skipping this task during the conditional evaluation step
            context_validation_error = e

        # Evaluate the conditional (if any) for this task, which we do before running
        # the final task post-validation. We do this before the post validation due to
        # the fact that the conditional may specify that the task be skipped due to a
        # variable not being present which would otherwise cause validation to fail
        try:
            if not self._task.evaluate_conditional(templar, variables):
                display.debug("when evaluation is False, skipping this task")
                return dict(changed=False, skipped=True, skip_reason='Conditional result was False', _ansible_no_log=self._play_context.no_log)
        except AnsibleError:
            # loop error takes precedence
            if self._loop_eval_error is not None:
                raise self._loop_eval_error  # pylint: disable=raising-bad-type
            raise

        # Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task
        if self._loop_eval_error is not None:
            raise self._loop_eval_error  # pylint: disable=raising-bad-type

        # if we ran into an error while setting up the PlayContext, raise it now
        if context_validation_error is not None:
            raise context_validation_error  # pylint: disable=raising-bad-type

        # if this task is a TaskInclude, we just return now with a success code so the
        # main thread can expand the task list for the given host
        if self._task.action in ('include', 'include_tasks'):
            include_variables = self._task.args.copy()
            include_file = include_variables.pop('_raw_params', None)
            if not include_file:
                return dict(failed=True, msg="No include file was specified to the include")

            include_file = templar.template(include_file)
            return dict(include=include_file, include_variables=include_variables)

        # if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host
        elif self._task.action == 'include_role':
            include_variables = self._task.args.copy()
            return dict(include_variables=include_variables)

        # Now we do final validation on the task, which sets all fields to their final values.
        self._task.post_validate(templar=templar)
        if '_variable_params' in self._task.args:
            variable_params = self._task.args.pop('_variable_params')
            if isinstance(variable_params, dict):
                display.deprecated("Using variables for task params is unsafe, especially if the variables come from an external source like facts",
                                   version="2.6")
                variable_params.update(self._task.args)
                self._task.args = variable_params

        # get the connection and the handler for this execution
        if (not self._connection or
                not getattr(self._connection, 'connected', False) or
                self._play_context.remote_addr != self._connection._play_context.remote_addr):
            self._connection = self._get_connection(variables=variables, templar=templar)
        else:
            # if connection is reused, its _play_context is no longer valid and needs
            # to be replaced with the one templated above, in case other data changed
            self._connection._play_context = self._play_context

        self._set_connection_options(variables, templar)
        self._set_shell_options(variables, templar)

        # get handler
        self._handler = self._get_action_handler(connection=self._connection, templar=templar)

        # Apply default params for action/module, if present
        # These are collected as a list of dicts, so we need to merge them
        module_defaults = {}
        for default in self._task.module_defaults:
            module_defaults.update(default)
        if module_defaults:
            module_defaults = templar.template(module_defaults)
        if self._task.action in module_defaults:
            tmp_args = module_defaults[self._task.action].copy()
            tmp_args.update(self._task.args)
            self._task.args = tmp_args

        # And filter out any fields which were set to default(omit), and got the omit token value
        omit_token = variables.get('omit')
        if omit_token is not None:
            self._task.args = remove_omit(self._task.args, omit_token)

        # Read some values from the task, so that we can modify them if need be
        if self._task.until:
            retries = self._task.retries
            if retries is None:
                retries = 3
            elif retries <= 0:
                retries = 1
            else:
                retries += 1
        else:
            retries = 1

        delay = self._task.delay
        if delay < 0:
            delay = 1

        # make a copy of the job vars here, in case we need to update them
        # with the registered variable value later on when testing conditions
        vars_copy = variables.copy()

        display.debug("starting attempt loop")
        result = None
        for attempt in range(1, retries + 1):
            display.debug("running the handler")
            try:
                result = self._handler.run(task_vars=variables)
            except AnsibleActionSkip as e:
                return dict(skipped=True, msg=to_text(e))
            except AnsibleActionFail as e:
                return dict(failed=True, msg=to_text(e))
            except AnsibleConnectionFailure as e:
                return dict(unreachable=True, msg=to_text(e))
            display.debug("handler run complete")

            # preserve no log
            result["_ansible_no_log"] = self._play_context.no_log

            # update the local copy of vars with the registered value, if specified,
            # or any facts which may have been generated by the module execution
            if self._task.register:
                vars_copy[self._task.register] = wrap_var(result)

            if self._task.async_val > 0:
                if self._task.poll > 0 and not result.get('skipped') and not result.get('failed'):
                    result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy)
                    # FIXME callback 'v2_runner_on_async_poll' here

                # ensure no log is preserved
                result["_ansible_no_log"] = self._play_context.no_log

            # helper methods for use below in evaluating changed/failed_when
            def _evaluate_changed_when_result(result):
                if self._task.changed_when is not None and self._task.changed_when:
                    cond = Conditional(loader=self._loader)
                    cond.when = self._task.changed_when
                    result['changed'] = cond.evaluate_conditional(templar, vars_copy)

            def _evaluate_failed_when_result(result):
                if self._task.failed_when:
                    cond = Conditional(loader=self._loader)
                    cond.when = self._task.failed_when
                    failed_when_result = cond.evaluate_conditional(templar, vars_copy)
                    result['failed_when_result'] = result['failed'] = failed_when_result
                else:
                    failed_when_result = False
                return failed_when_result

            if 'ansible_facts' in result:
                if self._task.action in ('set_fact', 'include_vars'):
                    vars_copy.update(result['ansible_facts'])
                else:
                    vars_copy.update(namespace_facts(result['ansible_facts']))
                    if C.INJECT_FACTS_AS_VARS:
                        vars_copy.update(clean_facts(result['ansible_facts']))

            # set the failed property if it was missing.
            if 'failed' not in result:
                # rc is here for backwards compatibility and modules that use it instead of 'failed'
                if 'rc' in result and result['rc'] not in [0, "0"]:
                    result['failed'] = True
                else:
                    result['failed'] = False

            # Make attempts and retries available early to allow their use in changed/failed_when
            if self._task.until:
                result['attempts'] = attempt

            # set the changed property if it was missing.
            if 'changed' not in result:
                result['changed'] = False

            # re-update the local copy of vars with the registered value, if specified,
            # or any facts which may have been generated by the module execution
            # This gives changed/failed_when access to additional recently modified
            # attributes of result
            if self._task.register:
                vars_copy[self._task.register] = wrap_var(result)

            # if we didn't skip this task, use the helpers to evaluate the changed/
            # failed_when properties
            if 'skipped' not in result:
                _evaluate_changed_when_result(result)
                _evaluate_failed_when_result(result)

            if retries > 1:
                cond = Conditional(loader=self._loader)
                cond.when = self._task.until
                if cond.evaluate_conditional(templar, vars_copy):
                    break
                else:
                    # no conditional check, or it failed, so sleep for the specified time
                    if attempt < retries:
                        result['_ansible_retry'] = True
                        result['retries'] = retries
                        display.debug('Retrying task, attempt %d of %d' % (attempt, retries))
                        self._rslt_q.put(TaskResult(self._host.name, self._task._uuid, result, task_fields=self._task.dump_attrs()), block=False)
                        time.sleep(delay)
        else:
            if retries > 1:
                # we ran out of attempts, so mark the result as failed
                result['attempts'] = retries - 1
                result['failed'] = True

        # do the final update of the local variables here, for both registered
        # values and any facts which may have been created
        if self._task.register:
            variables[self._task.register] = wrap_var(result)

        if 'ansible_facts' in result:
            if self._task.action in ('set_fact', 'include_vars'):
                variables.update(result['ansible_facts'])
            else:
                variables.update(namespace_facts(result['ansible_facts']))
                if C.INJECT_FACTS_AS_VARS:
                    variables.update(clean_facts(result['ansible_facts']))

        # save the notification target in the result, if it was specified, as
        # this task may be running in a loop in which case the notification
        # may be item-specific, ie. "notify: service {{item}}"
        if self._task.notify is not None:
            result['_ansible_notify'] = self._task.notify

        # add the delegated vars to the result, so we can reference them
        # on the results side without having to do any further templating
        # FIXME: we only want a limited set of variables here, so this is currently
        #        hardcoded but should be possibly fixed if we want more or if
        #        there is another source of truth we can use
        delegated_vars = variables.get('ansible_delegated_vars', dict()).get(self._task.delegate_to, dict()).copy()
        if len(delegated_vars) > 0:
            result["_ansible_delegated_vars"] = {'ansible_delegated_host': self._task.delegate_to}
            for k in ('ansible_host', ):
                result["_ansible_delegated_vars"][k] = delegated_vars.get(k)

        # and return
        display.debug("attempt loop complete, returning result")
        return result
Beispiel #3
0
 def send_result(self, result, host, task):
     debug("done running TaskExecutor() for %s/%s" % (host, task))
     res = TaskResult(host, task, result)
     self.send(res)
Beispiel #4
0
    def _run_loop(self, items):
        '''
        Runs the task with the loop items specified and collates the result
        into an array named 'results' which is inserted into the final result
        along with the item for which the loop ran.
        '''

        results = []

        # make copies of the job vars and task so we can add the item to
        # the variables and re-validate the task with the item variable
        # task_vars = self._job_vars.copy()
        task_vars = self._job_vars

        loop_var = 'item'
        index_var = None
        label = None
        loop_pause = 0
        if self._task.loop_control:
            loop_var = self._task.loop_control.loop_var
            index_var = self._task.loop_control.index_var
            loop_pause = self._task.loop_control.pause
            # the these may be 'None', so we still need to default to something useful
            label = self._task.loop_control.label or ('{{' + loop_var + '}}')

        if loop_var in task_vars:
            display.warning(u"The loop variable '%s' is already in use. "
                            u"You should set the `loop_var` value in the `loop_control` option for the task"
                            u" to something else to avoid variable collisions and unexpected behavior." % loop_var)

        ran_once = False
        if self._task.loop_with:
            # Only squash with 'with_:' not with the 'loop:', 'magic' squashing can be removed once with_ loops are
            items = self._squash_items(items, loop_var, task_vars)

        for item_index, item in enumerate(items):
            task_vars[loop_var] = item
            if index_var:
                task_vars[index_var] = item_index

            # pause between loop iterations
            if loop_pause and ran_once:
                time.sleep(loop_pause)
            else:
                ran_once = True

            try:
                tmp_task = self._task.copy(exclude_parent=True, exclude_tasks=True)
                tmp_task._parent = self._task._parent
                tmp_play_context = self._play_context.copy()
            except AnsibleParserError as e:
                results.append(dict(failed=True, msg=to_text(e)))
                continue

            # now we swap the internal task and play context with their copies,
            # execute, and swap them back so we can do the next iteration cleanly
            (self._task, tmp_task) = (tmp_task, self._task)
            (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
            res = self._execute(variables=task_vars)
            task_fields = self._task.dump_attrs()
            (self._task, tmp_task) = (tmp_task, self._task)
            (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)

            # now update the result with the item info, and append the result
            # to the list of results
            res[loop_var] = item
            if index_var:
                res[index_var] = item_index
            res['_ansible_item_result'] = True
            res['_ansible_ignore_errors'] = task_fields.get('ignore_errors')

            if label is not None:
                templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars)
                res['_ansible_item_label'] = templar.template(label)

            self._rslt_q.put(
                TaskResult(
                    self._host.name,
                    self._task._uuid,
                    res,
                    task_fields=task_fields,
                ),
                block=False,
            )
            results.append(res)
            del task_vars[loop_var]

        return results
Beispiel #5
0
    def _execute(self, variables=None):
        '''
        The primary workhorse of the executor system, this runs the task
        on the specified host (which may be the delegated_to host) and handles
        the retry/until and block rescue/always execution
        '''

        if variables is None:
            variables = self._job_vars

        templar = Templar(loader=self._loader,
                          shared_loader_obj=self._shared_loader_obj,
                          variables=variables)

        context_validation_error = None
        try:
            # apply the given task's information to the connection info,
            # which may override some fields already set by the play or
            # the options specified on the command line
            self._play_context = self._play_context.set_task_and_variable_override(
                task=self._task, variables=variables, templar=templar)

            # fields set from the play/task may be based on variables, so we have to
            # do the same kind of post validation step on it here before we use it.
            self._play_context.post_validate(templar=templar)

            # now that the play context is finalized, if the remote_addr is not set
            # default to using the host's address field as the remote address
            if not self._play_context.remote_addr:
                self._play_context.remote_addr = self._host.address

            # We also add "magic" variables back into the variables dict to make sure
            # a certain subset of variables exist.
            self._play_context.update_vars(variables)
        except AnsibleError as e:
            # save the error, which we'll raise later if we don't end up
            # skipping this task during the conditional evaluation step
            context_validation_error = e

        # Evaluate the conditional (if any) for this task, which we do before running
        # the final task post-validation. We do this before the post validation due to
        # the fact that the conditional may specify that the task be skipped due to a
        # variable not being present which would otherwise cause validation to fail
        try:
            if not self._task.evaluate_conditional(templar, variables):
                display.debug("when evaluation failed, skipping this task")
                return dict(changed=False,
                            skipped=True,
                            skip_reason='Conditional check failed',
                            _ansible_no_log=self._play_context.no_log)
        except AnsibleError:
            # skip conditional exception in the case of includes as the vars needed might not be avaiable except in the included tasks or due to tags
            if self._task.action in ['include', 'include_role']:
                raise

        # if we ran into an error while setting up the PlayContext, raise it now
        if context_validation_error is not None:
            raise context_validation_error

        # if this task is a TaskInclude, we just return now with a success code so the
        # main thread can expand the task list for the given host
        if self._task.action == 'include':
            include_variables = self._task.args.copy()
            include_file = include_variables.pop('_raw_params', None)
            if not include_file:
                return dict(failed=True,
                            msg="No include file was specified to the include")

            include_file = templar.template(include_file)
            return dict(include=include_file,
                        include_variables=include_variables)

        #TODO: not needed?
        # if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host
        elif self._task.action == 'include_role':
            include_variables = self._task.args.copy()
            role = templar.template(self._task._role_name)
            if not role:
                return dict(failed=True,
                            msg="No role was specified to include")
            return dict(include_role=role, include_variables=include_variables)

        # Now we do final validation on the task, which sets all fields to their final values.
        self._task.post_validate(templar=templar)
        if '_variable_params' in self._task.args:
            variable_params = self._task.args.pop('_variable_params')
            if isinstance(variable_params, dict):
                display.deprecated(
                    "Using variables for task params is unsafe, especially if the variables come from an external source like facts"
                )
                variable_params.update(self._task.args)
                self._task.args = variable_params

        # get the connection and the handler for this execution
        if not self._connection or not getattr(
                self._connection, 'connected', False
        ) or self._play_context.remote_addr != self._connection._play_context.remote_addr:
            self._connection = self._get_connection(variables=variables,
                                                    templar=templar)
            self._connection.set_host_overrides(
                host=self._host,
                hostvars=variables.get('hostvars',
                                       {}).get(self._host.name, {}))
        else:
            # if connection is reused, its _play_context is no longer valid and needs
            # to be replaced with the one templated above, in case other data changed
            self._connection._play_context = self._play_context

        self._handler = self._get_action_handler(connection=self._connection,
                                                 templar=templar)

        # And filter out any fields which were set to default(omit), and got the omit token value
        omit_token = variables.get('omit')
        if omit_token is not None:
            self._task.args = dict((i[0], i[1])
                                   for i in iteritems(self._task.args)
                                   if i[1] != omit_token)

        # Read some values from the task, so that we can modify them if need be
        if self._task.until:
            retries = self._task.retries
            if retries is None:
                retries = 3
            elif retries <= 0:
                retries = 1
            else:
                retries += 1
        else:
            retries = 1

        delay = self._task.delay
        if delay < 0:
            delay = 1

        # make a copy of the job vars here, in case we need to update them
        # with the registered variable value later on when testing conditions
        vars_copy = variables.copy()

        display.debug("starting attempt loop")
        result = None
        for attempt in range(1, retries + 1):
            display.debug("running the handler")
            try:
                result = self._handler.run(task_vars=variables)
            except AnsibleConnectionFailure as e:
                return dict(unreachable=True, msg=to_unicode(e))
            display.debug("handler run complete")

            # preserve no log
            result["_ansible_no_log"] = self._play_context.no_log

            # update the local copy of vars with the registered value, if specified,
            # or any facts which may have been generated by the module execution
            if self._task.register:
                vars_copy[self._task.register] = wrap_var(result.copy())

            if self._task. async > 0:
                if self._task.poll > 0:
                    result = self._poll_async_result(result=result,
                                                     templar=templar,
                                                     task_vars=vars_copy)

                # ensure no log is preserved
                result["_ansible_no_log"] = self._play_context.no_log

            # helper methods for use below in evaluating changed/failed_when
            def _evaluate_changed_when_result(result):
                if self._task.changed_when is not None and self._task.changed_when:
                    cond = Conditional(loader=self._loader)
                    cond.when = self._task.changed_when
                    result['changed'] = cond.evaluate_conditional(
                        templar, vars_copy)

            def _evaluate_failed_when_result(result):
                if self._task.failed_when:
                    cond = Conditional(loader=self._loader)
                    cond.when = self._task.failed_when
                    failed_when_result = cond.evaluate_conditional(
                        templar, vars_copy)
                    result['failed_when_result'] = result[
                        'failed'] = failed_when_result
                else:
                    failed_when_result = False
                return failed_when_result

            if 'ansible_facts' in result:
                vars_copy.update(result['ansible_facts'])

            # set the failed property if the result has a non-zero rc. This will be
            # overridden below if the failed_when property is set
            if result.get('rc', 0) != 0:
                result['failed'] = True

            # if we didn't skip this task, use the helpers to evaluate the changed/
            # failed_when properties
            if 'skipped' not in result:
                _evaluate_changed_when_result(result)
                _evaluate_failed_when_result(result)

            if retries > 1:
                cond = Conditional(loader=self._loader)
                cond.when = self._task.until
                if cond.evaluate_conditional(templar, vars_copy):
                    break
                else:
                    # no conditional check, or it failed, so sleep for the specified time
                    if attempt < retries:
                        result['attempts'] = attempt
                        result['_ansible_retry'] = True
                        result['retries'] = retries
                        display.debug('Retrying task, attempt %d of %d' %
                                      (attempt, retries))
                        self._rslt_q.put(TaskResult(self._host.name,
                                                    self._task._uuid, result),
                                         block=False)
                        time.sleep(delay)
Beispiel #6
0
    def _execute_meta(self, task, play_context, iterator, target_host):

        # meta tasks store their args in the _raw_params field of args,
        # since they do not use k=v pairs, so get that
        meta_action = task.args.get('_raw_params')

        # FIXME(s):
        # * raise an error or show a warning when a conditional is used
        #   on a meta task that doesn't support them

        def _evaluate_conditional(h):
            all_vars = self._variable_manager.get_vars(play=iterator._play, host=h, task=task)
            templar = Templar(loader=self._loader, variables=all_vars)
            return task.evaluate_conditional(templar, all_vars)

        skipped = False
        msg = ''
        if meta_action == 'noop':
            # FIXME: issue a callback for the noop here?
            msg = "noop"
        elif meta_action == 'flush_handlers':
            self.run_handlers(iterator, play_context)
            msg = "ran handlers"
        elif meta_action == 'refresh_inventory':
            self._inventory.refresh_inventory()
            msg = "inventory successfully refreshed"
        elif meta_action == 'clear_facts':
            if _evaluate_conditional(target_host):
                for host in self._inventory.get_hosts(iterator._play.hosts):
                    self._variable_manager.clear_facts(host)
                msg = "facts cleared"
            else:
                skipped = True
        elif meta_action == 'clear_host_errors':
            if _evaluate_conditional(target_host):
                for host in self._inventory.get_hosts(iterator._play.hosts):
                    self._tqm._failed_hosts.pop(host.name, False)
                    self._tqm._unreachable_hosts.pop(host.name, False)
                    iterator._host_states[host.name].fail_state = iterator.FAILED_NONE
                msg = "cleared host errors"
            else:
                skipped = True
        elif meta_action == 'end_play':
            if _evaluate_conditional(target_host):
                for host in self._inventory.get_hosts(iterator._play.hosts):
                    if host.name not in self._tqm._unreachable_hosts:
                        iterator._host_states[host.name].run_state = iterator.ITERATING_COMPLETE
                msg = "ending play"
        elif meta_action == 'reset_connection':
            connection = connection_loader.get(play_context.connection, play_context, os.devnull)
            if connection:
                connection.reset()
                msg = 'reset connection'
            else:
                msg = 'no connection, nothing to reset'
        else:
            raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)

        result = {'msg': msg}
        if skipped:
            result['skipped'] = True
        else:
            result['changed'] = False

        display.vv("META: %s" % msg)

        return [TaskResult(target_host, task, result)]
Beispiel #7
0
    def test_strategy_base_run_handlers(self, mock_worker):
        def fake_run(*args):
            return

        mock_worker.side_effect = fake_run
        mock_play_context = MagicMock()

        mock_handler_task = MagicMock(Handler)
        mock_handler_task.action = 'foo'
        mock_handler_task.get_name.return_value = "test handler"
        mock_handler_task.has_triggered.return_value = False
        mock_handler_task.listen = None
        mock_handler_task._role = None

        mock_handler = MagicMock()
        mock_handler.block = [mock_handler_task]
        mock_handler.flag_for_host.return_value = False

        mock_play = MagicMock()
        mock_play.handlers = [mock_handler]

        mock_host = MagicMock(Host)
        mock_host.name = "test01"
        mock_host.has_hostkey = True

        mock_inventory = MagicMock()
        mock_inventory.get_hosts.return_value = [mock_host]

        mock_var_mgr = MagicMock()
        mock_var_mgr.get_vars.return_value = dict()

        mock_iterator = MagicMock()
        mock_iterator._play = mock_play
        mock_iterator.get_original_task.return_value = mock_handler_task

        fake_loader = DictDataLoader()
        mock_options = MagicMock()
        mock_options.module_path = None

        tqm = TaskQueueManager(
            inventory=mock_inventory,
            variable_manager=mock_var_mgr,
            loader=fake_loader,
            options=mock_options,
            passwords=None,
        )
        tqm._initialize_processes(3)
        tqm._initialize_notified_handlers(mock_play)
        tqm.hostvars = dict()

        try:
            strategy_base = StrategyBase(tqm=tqm)

            strategy_base._inventory = mock_inventory
            strategy_base._notified_handlers = {mock_handler_task: [mock_host]}

            task_result = TaskResult(Host('host01'), Handler(),
                                     dict(changed=False))
            tqm._final_q.put(task_result)

            result = strategy_base.run_handlers(iterator=mock_iterator,
                                                play_context=mock_play_context)
        finally:
            strategy_base.cleanup()
            tqm.cleanup()
Beispiel #8
0
    def _poll_async_result(self, result, templar, task_vars=None):
        '''
        Polls for the specified JID to be complete
        '''

        if task_vars is None:
            task_vars = self._job_vars

        async_jid = result.get('ansible_job_id')
        if async_jid is None:
            return dict(failed=True, msg="No job id was returned by the async task")

        # Create a new pseudo-task to run the async_status module, and run
        # that (with a sleep for "poll" seconds between each retry) until the
        # async time limit is exceeded.

        async_task = Task().load(dict(action='async_status jid=%s' % async_jid, environment=self._task.environment))

        # FIXME: this is no longer the case, normal takes care of all, see if this can just be generalized
        # Because this is an async task, the action handler is async. However,
        # we need the 'normal' action handler for the status check, so get it
        # now via the action_loader
        async_handler = self._shared_loader_obj.action_loader.get(
            'ansible.legacy.async_status',
            task=async_task,
            connection=self._connection,
            play_context=self._play_context,
            loader=self._loader,
            templar=templar,
            shared_loader_obj=self._shared_loader_obj,
        )

        time_left = self._task.async_val
        while time_left > 0:
            time.sleep(self._task.poll)

            try:
                async_result = async_handler.run(task_vars=task_vars)
                # We do not bail out of the loop in cases where the failure
                # is associated with a parsing error. The async_runner can
                # have issues which result in a half-written/unparseable result
                # file on disk, which manifests to the user as a timeout happening
                # before it's time to timeout.
                if (int(async_result.get('finished', 0)) == 1 or
                        ('failed' in async_result and async_result.get('_ansible_parsed', False)) or
                        'skipped' in async_result):
                    break
            except Exception as e:
                # Connections can raise exceptions during polling (eg, network bounce, reboot); these should be non-fatal.
                # On an exception, call the connection's reset method if it has one
                # (eg, drop/recreate WinRM connection; some reused connections are in a broken state)
                display.vvvv("Exception during async poll, retrying... (%s)" % to_text(e))
                display.debug("Async poll exception was:\n%s" % to_text(traceback.format_exc()))
                try:
                    async_handler._connection.reset()
                except AttributeError:
                    pass

                # Little hack to raise the exception if we've exhausted the timeout period
                time_left -= self._task.poll
                if time_left <= 0:
                    raise
            else:
                time_left -= self._task.poll
                self._final_q.send_callback(
                    'v2_runner_on_async_poll',
                    TaskResult(
                        self._host,
                        async_task,
                        async_result,
                        task_fields=self._task.dump_attrs(),
                    ),
                )

        if int(async_result.get('finished', 0)) != 1:
            if async_result.get('_ansible_parsed'):
                return dict(failed=True, msg="async task did not complete within the requested time - %ss" % self._task.async_val)
            else:
                return dict(failed=True, msg="async task produced unparseable results", async_result=async_result)
        else:
            async_handler.cleanup(force=True)
            return async_result
Beispiel #9
0
    def run(self):
        '''
        Called when the process is started, and loops indefinitely
        until an error is encountered (typically an IOerror from the
        queue pipe being disconnected). During the loop, we attempt
        to pull tasks off the job queue and run them, pushing the result
        onto the results queue. We also remove the host from the blocked
        hosts list, to signify that they are ready for their next task.
        '''

        if HAS_ATFORK:
            atfork()

        while True:
            task = None
            try:
                (host, task, basedir, job_vars, play_context,
                 shared_loader_obj) = self._main_q.get()
                debug("there's work to be done!")
                debug("got a task/handler to work on: %s" % task)

                # because the task queue manager starts workers (forks) before the
                # playbook is loaded, set the basedir of the loader inherted by
                # this fork now so that we can find files correctly
                self._loader.set_basedir(basedir)

                # Serializing/deserializing tasks does not preserve the loader attribute,
                # since it is passed to the worker during the forking of the process and
                # would be wasteful to serialize. So we set it here on the task now, and
                # the task handles updating parent/child objects as needed.
                task.set_loader(self._loader)

                # execute the task and build a TaskResult from the result
                debug("running TaskExecutor() for %s/%s" % (host, task))
                executor_result = TaskExecutor(host, task, job_vars,
                                               play_context, self._new_stdin,
                                               self._loader,
                                               shared_loader_obj).run()
                debug("done running TaskExecutor() for %s/%s" % (host, task))
                task_result = TaskResult(host, task, executor_result)

                # put the result on the result queue
                debug("sending task result")
                self._rslt_q.put(task_result)
                debug("done sending task result")

            except queue.Empty:
                pass
            except AnsibleConnectionFailure:
                try:
                    if task:
                        task_result = TaskResult(host, task,
                                                 dict(unreachable=True))
                        self._rslt_q.put(task_result, block=False)
                except:
                    break
            except Exception as e:
                if isinstance(
                        e,
                    (IOError, EOFError, KeyboardInterrupt)) and not isinstance(
                        e, TemplateNotFound):
                    break
                else:
                    try:
                        if task:
                            task_result = TaskResult(
                                host, task,
                                dict(failed=True,
                                     exception=traceback.format_exc(),
                                     stdout=''))
                            self._rslt_q.put(task_result, block=False)
                    except:
                        debug("WORKER EXCEPTION: %s" % e)
                        debug("WORKER EXCEPTION: %s" % traceback.format_exc())
                        break

        debug("WORKER PROCESS EXITING")
Beispiel #10
0
    def run(self):
        '''
        Called when the process is started, and loops indefinitely
        until an error is encountered (typically an IOerror from the
        queue pipe being disconnected). During the loop, we attempt
        to pull tasks off the job queue and run them, pushing the result
        onto the results queue. We also remove the host from the blocked
        hosts list, to signify that they are ready for their next task.
        '''

        if HAS_ATFORK:
            atfork()

        while True:
            task = None
            try:
                if not self._main_q.empty():
                    debug("there's work to be done!")
                    (host, task, basedir, job_vars, play_context, shared_loader_obj) = self._main_q.get(block=False)
                    debug("got a task/handler to work on: %s" % task)

                    # because the task queue manager starts workers (forks) before the
                    # playbook is loaded, set the basedir of the loader inherted by
                    # this fork now so that we can find files correctly
                    self._loader.set_basedir(basedir)

                    # Serializing/deserializing tasks does not preserve the loader attribute,
                    # since it is passed to the worker during the forking of the process and
                    # would be wasteful to serialize. So we set it here on the task now, and
                    # the task handles updating parent/child objects as needed.
                    task.set_loader(self._loader)

                    # apply the given task's information to the connection info,
                    # which may override some fields already set by the play or
                    # the options specified on the command line
                    new_play_context = play_context.set_task_and_host_override(task=task, host=host)

                    # execute the task and build a TaskResult from the result
                    debug("running TaskExecutor() for %s/%s" % (host, task))
                    executor_result = TaskExecutor(host, task, job_vars, new_play_context, self._new_stdin, self._loader, shared_loader_obj).run()
                    debug("done running TaskExecutor() for %s/%s" % (host, task))
                    task_result = TaskResult(host, task, executor_result)

                    # put the result on the result queue
                    debug("sending task result")
                    self._rslt_q.put(task_result, block=False)
                    debug("done sending task result")

                else:
                    time.sleep(0.1)

            except queue.Empty:
                pass
            except (IOError, EOFError, KeyboardInterrupt):
                break
            except AnsibleConnectionFailure:
                try:
                    if task:
                        task_result = TaskResult(host, task, dict(unreachable=True))
                        self._rslt_q.put(task_result, block=False)
                except:
                    # FIXME: most likely an abort, catch those kinds of errors specifically
                    break
            except Exception, e:
                debug("WORKER EXCEPTION: %s" % e)
                debug("WORKER EXCEPTION: %s" % traceback.format_exc())
                try:
                    if task:
                        task_result = TaskResult(host, task, dict(failed=True, exception=traceback.format_exc(), stdout=''))
                        self._rslt_q.put(task_result, block=False)
                except:
                    # FIXME: most likely an abort, catch those kinds of errors specifically
                    break
Beispiel #11
0
    def test_strategy_base_process_pending_results(self):
        mock_tqm = MagicMock()
        mock_tqm._terminated = False
        mock_tqm._failed_hosts = dict()
        mock_tqm._unreachable_hosts = dict()
        mock_tqm.send_callback.return_value = None
        mock_tqm._notified_handlers = {}
        mock_tqm._listening_handlers = {}

        queue_items = []
        def _queue_empty(*args, **kwargs):
            return len(queue_items) == 0
        def _queue_get(*args, **kwargs):
            if len(queue_items) == 0:
                raise Queue.Empty
            else:
                return queue_items.pop()

        mock_queue = MagicMock()
        mock_queue.empty.side_effect = _queue_empty
        mock_queue.get.side_effect = _queue_get
        mock_tqm._final_q = mock_queue

        mock_tqm._stats = MagicMock()
        mock_tqm._stats.increment.return_value = None
        
        mock_play = MagicMock()

        mock_host = MagicMock()
        mock_host.name = 'test01'
        mock_host.vars = dict()
        mock_host.has_hostkey = True

        mock_task = MagicMock()
        mock_task._role = None
        mock_task.ignore_errors = False
        mock_task._uuid = uuid.uuid4()
        mock_task.loop = None

        mock_handler_task = MagicMock(Handler)
        mock_handler_task.name = 'test handler'
        mock_handler_task.action = 'foo'
        mock_handler_task.get_name.return_value = "test handler"
        mock_handler_task.has_triggered.return_value = False

        mock_iterator = MagicMock()
        mock_iterator._play = mock_play
        mock_iterator.mark_host_failed.return_value = None
        mock_iterator.get_next_task_for_host.return_value = (None, None)
        mock_iterator.get_original_task.return_value = mock_task

        mock_handler_block = MagicMock()
        mock_handler_block.block = [mock_handler_task]
        mock_handler_block.rescue = []
        mock_handler_block.always = []
        mock_play.handlers = [mock_handler_block]

        mock_tqm._notified_handlers = {mock_handler_task: []}
        mock_tqm._listening_handlers = {}

        mock_group = MagicMock()
        mock_group.add_host.return_value = None

        def _get_host(host_name):
            if host_name == 'test01':
                return mock_host
            return None
        def _get_group(group_name):
            if group_name in ('all', 'foo'):
                return mock_group
            return None

        mock_inventory = MagicMock()
        mock_inventory._hosts_cache = dict()
        mock_inventory.get_host.side_effect = _get_host
        mock_inventory.get_group.side_effect = _get_group
        mock_inventory.clear_pattern_cache.return_value = None
        mock_inventory.get_host_vars.return_value = {}

        mock_var_mgr = MagicMock()
        mock_var_mgr.set_host_variable.return_value = None
        mock_var_mgr.set_host_facts.return_value = None

        strategy_base = StrategyBase(tqm=mock_tqm)
        strategy_base._inventory = mock_inventory
        strategy_base._variable_manager = mock_var_mgr
        strategy_base._blocked_hosts = dict()

        def _has_dead_workers():
            return False            

        strategy_base._tqm.has_dead_workers = _has_dead_workers
        results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
        self.assertEqual(len(results), 0)

        task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(changed=True))
        queue_items.append(task_result)
        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1
        results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
        self.assertEqual(len(results), 1)
        self.assertEqual(results[0], task_result)
        self.assertEqual(strategy_base._pending_results, 0)
        self.assertNotIn('test01', strategy_base._blocked_hosts)

        task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data='{"failed":true}')
        queue_items.append(task_result)
        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1
        mock_iterator.is_failed.return_value = True
        results = strategy_base._process_pending_results(iterator=mock_iterator)
        self.assertEqual(len(results), 1)
        self.assertEqual(results[0], task_result)
        self.assertEqual(strategy_base._pending_results, 0)
        self.assertNotIn('test01', strategy_base._blocked_hosts)
        self.assertIn('test01', mock_tqm._failed_hosts)
        del mock_tqm._failed_hosts['test01']
        mock_iterator.is_failed.return_value = False

        task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data='{"unreachable": true}')
        queue_items.append(task_result)
        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1
        results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
        self.assertEqual(len(results), 1)
        self.assertEqual(results[0], task_result)
        self.assertEqual(strategy_base._pending_results, 0)
        self.assertNotIn('test01', strategy_base._blocked_hosts)
        self.assertIn('test01', mock_tqm._unreachable_hosts)
        del mock_tqm._unreachable_hosts['test01']

        task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data='{"skipped": true}')
        queue_items.append(task_result)
        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1
        results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
        self.assertEqual(len(results), 1)
        self.assertEqual(results[0], task_result)
        self.assertEqual(strategy_base._pending_results, 0)
        self.assertNotIn('test01', strategy_base._blocked_hosts)

        queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(add_host=dict(host_name='newhost01', new_groups=['foo']))))
        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1
        results = strategy_base._process_pending_results(iterator=mock_iterator)
        self.assertEqual(len(results), 1)
        self.assertEqual(strategy_base._pending_results, 0)
        self.assertNotIn('test01', strategy_base._blocked_hosts)

        queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(add_group=dict(group_name='foo'))))
        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1
        results = strategy_base._process_pending_results(iterator=mock_iterator)
        self.assertEqual(len(results), 1)
        self.assertEqual(strategy_base._pending_results, 0)
        self.assertNotIn('test01', strategy_base._blocked_hosts)

        queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(changed=True, _ansible_notify=['test handler'])))
        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1
        results = strategy_base._process_pending_results(iterator=mock_iterator)
        self.assertEqual(len(results), 1)
        self.assertEqual(strategy_base._pending_results, 0)
        self.assertNotIn('test01', strategy_base._blocked_hosts)
        self.assertIn(mock_handler_task, strategy_base._notified_handlers)
        self.assertIn(mock_host, strategy_base._notified_handlers[mock_handler_task])
Beispiel #12
0
    def _run_loop(self, items):
        '''
        Runs the task with the loop items specified and collates the result
        into an array named 'results' which is inserted into the final result
        along with the item for which the loop ran.
        '''

        results = []

        # make copies of the job vars and task so we can add the item to
        # the variables and re-validate the task with the item variable
        # task_vars = self._job_vars.copy()
        task_vars = self._job_vars

        loop_var = 'item'
        index_var = None
        label = None
        loop_pause = 0
        extended = False
        templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars)

        # FIXME: move this to the object itself to allow post_validate to take care of templating (loop_control.post_validate)
        if self._task.loop_control:
            loop_var = templar.template(self._task.loop_control.loop_var)
            index_var = templar.template(self._task.loop_control.index_var)
            loop_pause = templar.template(self._task.loop_control.pause)
            extended = templar.template(self._task.loop_control.extended)

            # This may be 'None',so it is templated below after we ensure a value and an item is assigned
            label = self._task.loop_control.label

        # ensure we always have a label
        if label is None:
            label = '{{' + loop_var + '}}'

        if loop_var in task_vars:
            display.warning(u"The loop variable '%s' is already in use. "
                            u"You should set the `loop_var` value in the `loop_control` option for the task"
                            u" to something else to avoid variable collisions and unexpected behavior." % loop_var)

        ran_once = False

        no_log = False
        items_len = len(items)
        for item_index, item in enumerate(items):
            task_vars['ansible_loop_var'] = loop_var

            task_vars[loop_var] = item
            if index_var:
                task_vars['ansible_index_var'] = index_var
                task_vars[index_var] = item_index

            if extended:
                task_vars['ansible_loop'] = {
                    'allitems': items,
                    'index': item_index + 1,
                    'index0': item_index,
                    'first': item_index == 0,
                    'last': item_index + 1 == items_len,
                    'length': items_len,
                    'revindex': items_len - item_index,
                    'revindex0': items_len - item_index - 1,
                }
                try:
                    task_vars['ansible_loop']['nextitem'] = items[item_index + 1]
                except IndexError:
                    pass
                if item_index - 1 >= 0:
                    task_vars['ansible_loop']['previtem'] = items[item_index - 1]

            # Update template vars to reflect current loop iteration
            templar.available_variables = task_vars

            # pause between loop iterations
            if loop_pause and ran_once:
                try:
                    time.sleep(float(loop_pause))
                except ValueError as e:
                    raise AnsibleError('Invalid pause value: %s, produced error: %s' % (loop_pause, to_native(e)))
            else:
                ran_once = True

            try:
                tmp_task = self._task.copy(exclude_parent=True, exclude_tasks=True)
                tmp_task._parent = self._task._parent
                tmp_play_context = self._play_context.copy()
            except AnsibleParserError as e:
                results.append(dict(failed=True, msg=to_text(e)))
                continue

            # now we swap the internal task and play context with their copies,
            # execute, and swap them back so we can do the next iteration cleanly
            (self._task, tmp_task) = (tmp_task, self._task)
            (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
            res = self._execute(variables=task_vars)
            task_fields = self._task.dump_attrs()
            (self._task, tmp_task) = (tmp_task, self._task)
            (self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)

            # update 'general no_log' based on specific no_log
            no_log = no_log or tmp_task.no_log

            # now update the result with the item info, and append the result
            # to the list of results
            res[loop_var] = item
            res['ansible_loop_var'] = loop_var
            if index_var:
                res[index_var] = item_index
                res['ansible_index_var'] = index_var
            if extended:
                res['ansible_loop'] = task_vars['ansible_loop']

            res['_ansible_item_result'] = True
            res['_ansible_ignore_errors'] = task_fields.get('ignore_errors')

            # gets templated here unlike rest of loop_control fields, depends on loop_var above
            try:
                res['_ansible_item_label'] = templar.template(label, cache=False)
            except AnsibleUndefinedVariable as e:
                res.update({
                    'failed': True,
                    'msg': 'Failed to template loop_control.label: %s' % to_text(e)
                })

            self._final_q.put(
                TaskResult(
                    self._host.name,
                    self._task._uuid,
                    res,
                    task_fields=task_fields,
                ),
                block=False,
            )
            results.append(res)
            del task_vars[loop_var]

            # clear 'connection related' plugin variables for next iteration
            if self._connection:
                clear_plugins = {
                    'connection': self._connection._load_name,
                    'shell': self._connection._shell._load_name
                }
                if self._connection.become:
                    clear_plugins['become'] = self._connection.become._load_name

                for plugin_type, plugin_name in iteritems(clear_plugins):
                    for var in C.config.get_plugin_vars(plugin_type, plugin_name):
                        if var in task_vars and var not in self._job_vars:
                            del task_vars[var]

        self._task.no_log = no_log

        return results
Beispiel #13
0
 def test_host_label(self):
     result = TaskResult(host=Host('host1'), task=None, return_data={})
     self.assertEquals(CallbackBase.host_label(result), 'host1')
    def test_strategy_base_process_pending_results(self):
        mock_tqm = MagicMock()
        mock_tqm._terminated = False
        mock_tqm._failed_hosts = dict()
        mock_tqm._unreachable_hosts = dict()
        mock_tqm.send_callback.return_value = None

        queue_items = []

        def _queue_empty(*args, **kwargs):
            return len(queue_items) == 0

        def _queue_get(*args, **kwargs):
            if len(queue_items) == 0:
                raise Queue.Empty
            else:
                return queue_items.pop()

        def _queue_put(item, *args, **kwargs):
            queue_items.append(item)

        mock_queue = MagicMock()
        mock_queue.empty.side_effect = _queue_empty
        mock_queue.get.side_effect = _queue_get
        mock_queue.put.side_effect = _queue_put
        mock_tqm._final_q = mock_queue

        mock_tqm._stats = MagicMock()
        mock_tqm._stats.increment.return_value = None

        mock_play = MagicMock()

        mock_host = MagicMock()
        mock_host.name = 'test01'
        mock_host.vars = dict()
        mock_host.get_vars.return_value = dict()
        mock_host.has_hostkey = True

        mock_task = MagicMock()
        mock_task._role = None
        mock_task._parent = None
        mock_task.ignore_errors = False
        mock_task.ignore_unreachable = False
        mock_task._uuid = str(uuid.uuid4())
        mock_task.loop = None
        mock_task.copy.return_value = mock_task

        mock_handler_task = Handler()
        mock_handler_task.name = 'test handler'
        mock_handler_task.action = 'foo'
        mock_handler_task._parent = None
        mock_handler_task._uuid = 'xxxxxxxxxxxxx'

        mock_iterator = MagicMock()
        mock_iterator._play = mock_play
        mock_iterator.mark_host_failed.return_value = None
        mock_iterator.get_next_task_for_host.return_value = (None, None)

        mock_handler_block = MagicMock()
        mock_handler_block.block = [mock_handler_task]
        mock_handler_block.rescue = []
        mock_handler_block.always = []
        mock_play.handlers = [mock_handler_block]

        mock_group = MagicMock()
        mock_group.add_host.return_value = None

        def _get_host(host_name):
            if host_name == 'test01':
                return mock_host
            return None

        def _get_group(group_name):
            if group_name in ('all', 'foo'):
                return mock_group
            return None

        mock_inventory = MagicMock()
        mock_inventory._hosts_cache = dict()
        mock_inventory.hosts.return_value = mock_host
        mock_inventory.get_host.side_effect = _get_host
        mock_inventory.get_group.side_effect = _get_group
        mock_inventory.clear_pattern_cache.return_value = None
        mock_inventory.get_host_vars.return_value = {}
        mock_inventory.hosts.get.return_value = mock_host

        mock_var_mgr = MagicMock()
        mock_var_mgr.set_host_variable.return_value = None
        mock_var_mgr.set_host_facts.return_value = None
        mock_var_mgr.get_vars.return_value = dict()

        strategy_base = StrategyBase(tqm=mock_tqm)
        strategy_base._inventory = mock_inventory
        strategy_base._variable_manager = mock_var_mgr
        strategy_base._blocked_hosts = dict()

        def _has_dead_workers():
            return False

        strategy_base._tqm.has_dead_workers.side_effect = _has_dead_workers
        results = strategy_base._wait_on_pending_results(
            iterator=mock_iterator)
        self.assertEqual(len(results), 0)

        task_result = TaskResult(host=mock_host.name,
                                 task=mock_task._uuid,
                                 return_data=dict(changed=True))
        queue_items.append(task_result)
        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1

        def mock_queued_task_cache():
            return {
                (mock_host.name, mock_task._uuid): {
                    'task': mock_task,
                    'host': mock_host,
                    'task_vars': {},
                    'play_context': {},
                }
            }

        strategy_base._queued_task_cache = mock_queued_task_cache()
        results = strategy_base._wait_on_pending_results(
            iterator=mock_iterator)
        self.assertEqual(len(results), 1)
        self.assertEqual(results[0], task_result)
        self.assertEqual(strategy_base._pending_results, 0)
        self.assertNotIn('test01', strategy_base._blocked_hosts)

        task_result = TaskResult(host=mock_host.name,
                                 task=mock_task._uuid,
                                 return_data='{"failed":true}')
        queue_items.append(task_result)
        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1
        mock_iterator.is_failed.return_value = True
        strategy_base._queued_task_cache = mock_queued_task_cache()
        results = strategy_base._wait_on_pending_results(
            iterator=mock_iterator)
        self.assertEqual(len(results), 1)
        self.assertEqual(results[0], task_result)
        self.assertEqual(strategy_base._pending_results, 0)
        self.assertNotIn('test01', strategy_base._blocked_hosts)
        # self.assertIn('test01', mock_tqm._failed_hosts)
        # del mock_tqm._failed_hosts['test01']
        mock_iterator.is_failed.return_value = False

        task_result = TaskResult(host=mock_host.name,
                                 task=mock_task._uuid,
                                 return_data='{"unreachable": true}')
        queue_items.append(task_result)
        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1
        strategy_base._queued_task_cache = mock_queued_task_cache()
        results = strategy_base._wait_on_pending_results(
            iterator=mock_iterator)
        self.assertEqual(len(results), 1)
        self.assertEqual(results[0], task_result)
        self.assertEqual(strategy_base._pending_results, 0)
        self.assertNotIn('test01', strategy_base._blocked_hosts)
        self.assertIn('test01', mock_tqm._unreachable_hosts)
        del mock_tqm._unreachable_hosts['test01']

        task_result = TaskResult(host=mock_host.name,
                                 task=mock_task._uuid,
                                 return_data='{"skipped": true}')
        queue_items.append(task_result)
        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1
        strategy_base._queued_task_cache = mock_queued_task_cache()
        results = strategy_base._wait_on_pending_results(
            iterator=mock_iterator)
        self.assertEqual(len(results), 1)
        self.assertEqual(results[0], task_result)
        self.assertEqual(strategy_base._pending_results, 0)
        self.assertNotIn('test01', strategy_base._blocked_hosts)

        queue_items.append(
            TaskResult(
                host=mock_host.name,
                task=mock_task._uuid,
                return_data=dict(
                    add_host=dict(host_name='newhost01', new_groups=['foo']))))
        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1
        strategy_base._queued_task_cache = mock_queued_task_cache()
        results = strategy_base._wait_on_pending_results(
            iterator=mock_iterator)
        self.assertEqual(len(results), 1)
        self.assertEqual(strategy_base._pending_results, 0)
        self.assertNotIn('test01', strategy_base._blocked_hosts)

        queue_items.append(
            TaskResult(host=mock_host.name,
                       task=mock_task._uuid,
                       return_data=dict(add_group=dict(group_name='foo'))))
        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1
        strategy_base._queued_task_cache = mock_queued_task_cache()
        results = strategy_base._wait_on_pending_results(
            iterator=mock_iterator)
        self.assertEqual(len(results), 1)
        self.assertEqual(strategy_base._pending_results, 0)
        self.assertNotIn('test01', strategy_base._blocked_hosts)

        queue_items.append(
            TaskResult(host=mock_host.name,
                       task=mock_task._uuid,
                       return_data=dict(changed=True,
                                        _ansible_notify=['test handler'])))
        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1
        strategy_base._queued_task_cache = mock_queued_task_cache()
        results = strategy_base._wait_on_pending_results(
            iterator=mock_iterator)
        self.assertEqual(len(results), 1)
        self.assertEqual(strategy_base._pending_results, 0)
        self.assertNotIn('test01', strategy_base._blocked_hosts)
        self.assertTrue(mock_handler_task.is_host_notified(mock_host))

        # queue_items.append(('set_host_var', mock_host, mock_task, None, 'foo', 'bar'))
        # results = strategy_base._process_pending_results(iterator=mock_iterator)
        # self.assertEqual(len(results), 0)
        # self.assertEqual(strategy_base._pending_results, 1)

        # queue_items.append(('set_host_facts', mock_host, mock_task, None, 'foo', dict()))
        # results = strategy_base._process_pending_results(iterator=mock_iterator)
        # self.assertEqual(len(results), 0)
        # self.assertEqual(strategy_base._pending_results, 1)

        # queue_items.append(('bad'))
        # self.assertRaises(AnsibleError, strategy_base._process_pending_results, iterator=mock_iterator)
        strategy_base.cleanup()
Beispiel #15
0
 def send_task_result(self, *args, **kwargs):
     if isinstance(args[0], TaskResult):
         tr = args[0]
     else:
         tr = TaskResult(*args, **kwargs)
     self.put(tr, block=False)
    def test_strategy_base_run_handlers(self, mock_worker):
        def fake_run(*args):
            return

        mock_worker.side_effect = fake_run
        mock_play_context = MagicMock()

        mock_handler_task = Handler()
        mock_handler_task.action = 'foo'
        mock_handler_task.cached_name = False
        mock_handler_task.name = "test handler"
        mock_handler_task.listen = []
        mock_handler_task._role = None
        mock_handler_task._parent = None
        mock_handler_task._uuid = 'xxxxxxxxxxxxxxxx'

        mock_handler = MagicMock()
        mock_handler.block = [mock_handler_task]
        mock_handler.flag_for_host.return_value = False

        mock_play = MagicMock()
        mock_play.handlers = [mock_handler]

        mock_host = MagicMock(Host)
        mock_host.name = "test01"
        mock_host.has_hostkey = True

        mock_inventory = MagicMock()
        mock_inventory.get_hosts.return_value = [mock_host]
        mock_inventory.get.return_value = mock_host
        mock_inventory.get_host.return_value = mock_host

        mock_var_mgr = MagicMock()
        mock_var_mgr.get_vars.return_value = dict()

        mock_iterator = MagicMock()
        mock_iterator._play = mock_play

        fake_loader = DictDataLoader()

        tqm = TaskQueueManager(
            inventory=mock_inventory,
            variable_manager=mock_var_mgr,
            loader=fake_loader,
            passwords=None,
            forks=5,
        )
        tqm._initialize_processes(3)
        tqm.hostvars = dict()

        try:
            strategy_base = StrategyBase(tqm=tqm)

            strategy_base._inventory = mock_inventory

            task_result = TaskResult(mock_host.name, mock_handler_task._uuid,
                                     dict(changed=False))
            strategy_base._queued_task_cache = dict()
            strategy_base._queued_task_cache[(mock_host.name,
                                              mock_handler_task._uuid)] = {
                                                  'task': mock_handler_task,
                                                  'host': mock_host,
                                                  'task_vars': {},
                                                  'play_context':
                                                  mock_play_context
                                              }
            tqm._final_q.put(task_result)

            result = strategy_base.run_handlers(iterator=mock_iterator,
                                                play_context=mock_play_context)
        finally:
            strategy_base.cleanup()
            tqm.cleanup()
Beispiel #17
0
    def _execute_meta(self, task, play_context, iterator, target_host=None):

        # meta tasks store their args in the _raw_params field of args,
        # since they do not use k=v pairs, so get that
        meta_action = task.args.get('_raw_params')

        # FIXME(s):
        # * raise an error or show a warning when a conditional is used
        #   on a meta task that doesn't support them

        def _evaluate_conditional(h):
            all_vars = self._variable_manager.get_vars(loader=self._loader,
                                                       play=iterator._play,
                                                       host=host,
                                                       task=task)
            templar = Templar(loader=self._loader, variables=all_vars)
            return task.evaluate_conditional(templar, all_vars)

        if target_host:
            host_list = [target_host]
        else:
            host_list = [
                host
                for host in self._inventory.get_hosts(iterator._play.hosts)
                if host.name not in self._tqm._unreachable_hosts
            ]

        results = []
        for host in host_list:
            result = None
            if meta_action == 'noop':
                # FIXME: issue a callback for the noop here?
                result = TaskResult(host, task, dict(changed=False,
                                                     msg="noop"))
            elif meta_action == 'flush_handlers':
                self.run_handlers(iterator, play_context)
            elif meta_action == 'refresh_inventory':
                self._inventory.refresh_inventory()
                result = TaskResult(
                    host, task,
                    dict(changed=False,
                         msg="inventory successfully refreshed"))
            elif meta_action == 'clear_facts':
                if _evaluate_conditional(host):
                    self._variable_manager.clear_facts(target_host)
                    result = TaskResult(
                        host, task,
                        dict(changed=True,
                             msg="inventory successfully refreshed"))
                else:
                    result = TaskResult(host, task,
                                        dict(changed=False, skipped=True))
            elif meta_action == 'clear_host_errors':
                if _evaluate_conditional(host):
                    self._tqm._failed_hosts.pop(host.name, False)
                    self._tqm._unreachable_hosts.pop(host.name, False)
                    iterator._host_states[
                        host.name].fail_state = iterator.FAILED_NONE
                    result = TaskResult(
                        host, task,
                        dict(changed=True,
                             msg="successfully cleared host errors"))
                else:
                    result = TaskResult(host, task,
                                        dict(changed=False, skipped=True))
            elif meta_action == 'end_play':
                if _evaluate_conditional(host):
                    iterator._host_states[
                        host.name].run_state = iterator.ITERATING_COMPLETE
                    result = TaskResult(host, task,
                                        dict(changed=True, msg="ending play"))
                else:
                    result = TaskResult(host, task,
                                        dict(changed=False, skipped=True))
            #elif meta_action == 'reset_connection':
            #    connection_info.connection.close()
            else:
                raise AnsibleError("invalid meta action requested: %s" %
                                   meta_action,
                                   obj=task._ds)

            if result is not None:
                results.append(result)

        return results
Beispiel #18
0
    def _run_loop(self, items):
        '''
        Runs the task with the loop items specified and collates the result
        into an array named 'results' which is inserted into the final result
        along with the item for which the loop ran.
        '''

        results = []

        # make copies of the job vars and task so we can add the item to
        # the variables and re-validate the task with the item variable
        #task_vars = self._job_vars.copy()
        task_vars = self._job_vars

        loop_var = 'item'
        label = None
        loop_pause = 0
        if self._task.loop_control:
            # the value may be 'None', so we still need to default it back to 'item'
            loop_var = self._task.loop_control.loop_var or 'item'
            label = self._task.loop_control.label or ('{{' + loop_var + '}}')
            loop_pause = self._task.loop_control.pause or 0

        if loop_var in task_vars:
            display.warning(
                "The loop variable '%s' is already in use. You should set the `loop_var` value in the `loop_control` option for the task to something else to avoid variable collisions and unexpected behavior."
                % loop_var)

        ran_once = False
        items = self._squash_items(items, loop_var, task_vars)
        for item in items:
            task_vars[loop_var] = item

            # pause between loop iterations
            if loop_pause and ran_once:
                time.sleep(loop_pause)
            else:
                ran_once = True

            try:
                tmp_task = self._task.copy(exclude_parent=True,
                                           exclude_tasks=True)
                tmp_task._parent = self._task._parent
                tmp_play_context = self._play_context.copy()
            except AnsibleParserError as e:
                results.append(dict(failed=True, msg=to_unicode(e)))
                continue

            # now we swap the internal task and play context with their copies,
            # execute, and swap them back so we can do the next iteration cleanly
            (self._task, tmp_task) = (tmp_task, self._task)
            (self._play_context, tmp_play_context) = (tmp_play_context,
                                                      self._play_context)
            res = self._execute(variables=task_vars)
            (self._task, tmp_task) = (tmp_task, self._task)
            (self._play_context, tmp_play_context) = (tmp_play_context,
                                                      self._play_context)

            # now update the result with the item info, and append the result
            # to the list of results
            res[loop_var] = item
            res['_ansible_item_result'] = True

            if not label is None:
                templar = Templar(loader=self._loader,
                                  shared_loader_obj=self._shared_loader_obj,
                                  variables=self._job_vars)
                res['_ansible_item_label'] = templar.template(
                    label, fail_on_undefined=False)

            self._rslt_q.put(TaskResult(self._host.name, self._task._uuid,
                                        res),
                             block=False)
            results.append(res)
            del task_vars[loop_var]

        return results
Beispiel #19
0
    def _load_included_file(self, included_file, iterator, is_handler=False):
        '''
        Loads an included YAML file of tasks, applying the optional set of variables.
        '''

        try:
            data = self._loader.load_from_file(included_file._filename)
            if data is None:
                return []
            elif not isinstance(data, list):
                raise AnsibleError("included task files must contain a list of tasks")

            block_list = load_list_of_blocks(
                data,
                play=included_file._task._block._play,
                parent_block=included_file._task._block,
                task_include=included_file._task,
                role=included_file._task._role,
                use_handlers=is_handler,
                loader=self._loader
            )

            # since we skip incrementing the stats when the task result is
            # first processed, we do so now for each host in the list
            for host in included_file._hosts:
                self._tqm._stats.increment('ok', host.name)

        except AnsibleError as e:
            # mark all of the hosts including this file as failed, send callbacks,
            # and increment the stats for this host
            for host in included_file._hosts:
                tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=str(e)))
                iterator.mark_host_failed(host)
                self._tqm._failed_hosts[host.name] = True
                self._tqm._stats.increment('failures', host.name)
                self._tqm.send_callback('v2_runner_on_failed', tr)
            return []

        # set the vars for this task from those specified as params to the include
        for b in block_list:
            # first make a copy of the including task, so that each has a unique copy to modify
            # FIXME: not sure if this is the best way to fix this, as we might be losing
            #        information in the copy. Previously we assigned the include params to
            #        the block variables directly, which caused other problems, so we may
            #        need to figure out a third option if this also presents problems.
            b._task_include = b._task_include.copy(exclude_block=True)
            # then we create a temporary set of vars to ensure the variable reference is unique
            temp_vars = b._task_include.vars.copy()
            temp_vars.update(included_file._args.copy())
            # pop tags out of the include args, if they were specified there, and assign
            # them to the include. If the include already had tags specified, we raise an
            # error so that users know not to specify them both ways
            tags = temp_vars.pop('tags', [])
            if isinstance(tags, string_types):
                tags = [ tags ]
            if len(tags) > 0:
                if len(b._task_include.tags) > 0:
                    raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task)", obj=included_file._task._ds)
                self._display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option")
                b._task_include.tags = tags
            b._task_include.vars = temp_vars

        # finally, send the callback and return the list of blocks loaded
        self._tqm.send_callback('v2_playbook_on_include', included_file)
        return block_list
Beispiel #20
0
    def _load_included_file(self, included_file, iterator, is_handler=False):
        '''
        Loads an included YAML file of tasks, applying the optional set of variables.
        '''

        display.debug("loading included file: %s" % included_file._filename)
        try:
            data = self._loader.load_from_file(included_file._filename)
            if data is None:
                return []
            elif not isinstance(data, list):
                raise AnsibleError("included task files must contain a list of tasks")

            ti_copy = included_file._task.copy()
            temp_vars = ti_copy.vars.copy()
            temp_vars.update(included_file._args)
            # pop tags out of the include args, if they were specified there, and assign
            # them to the include. If the include already had tags specified, we raise an
            # error so that users know not to specify them both ways
            tags = included_file._task.vars.pop('tags', [])
            if isinstance(tags, string_types):
                tags = tags.split(',')
            if len(tags) > 0:
                if len(included_file._task.tags) > 0:
                    raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). "
                                             "Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement",
                                             obj=included_file._task._ds)
                display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option")
                included_file._task.tags = tags

            ti_copy.vars = temp_vars

            block_list = load_list_of_blocks(
                data,
                play=iterator._play,
                parent_block=None,
                task_include=ti_copy,
                role=included_file._task._role,
                use_handlers=is_handler,
                loader=self._loader,
                variable_manager=self._variable_manager,
            )

            # since we skip incrementing the stats when the task result is
            # first processed, we do so now for each host in the list
            for host in included_file._hosts:
                self._tqm._stats.increment('ok', host.name)

        except AnsibleError as e:
            # mark all of the hosts including this file as failed, send callbacks,
            # and increment the stats for this host
            for host in included_file._hosts:
                tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=to_text(e)))
                iterator.mark_host_failed(host)
                self._tqm._failed_hosts[host.name] = True
                self._tqm._stats.increment('failures', host.name)
                self._tqm.send_callback('v2_runner_on_failed', tr)
            return []

        # finally, send the callback and return the list of blocks loaded
        self._tqm.send_callback('v2_playbook_on_include', included_file)
        display.debug("done processing included file")
        return block_list
    def test_strategy_base_process_pending_results(self):
        mock_tqm = MagicMock()
        mock_tqm._terminated = False
        mock_tqm._failed_hosts = dict()
        mock_tqm._unreachable_hosts = dict()
        mock_tqm.send_callback.return_value = None

        queue_items = []

        def _queue_empty(*args, **kwargs):
            return len(queue_items) == 0

        def _queue_get(*args, **kwargs):
            if len(queue_items) == 0:
                raise Queue.Empty
            else:
                return queue_items.pop()

        mock_queue = MagicMock()
        mock_queue.empty.side_effect = _queue_empty
        mock_queue.get.side_effect = _queue_get
        mock_tqm._final_q = mock_queue

        mock_tqm._stats = MagicMock()
        mock_tqm._stats.increment.return_value = None

        mock_iterator = MagicMock()
        mock_iterator.mark_host_failed.return_value = None
        mock_iterator.get_next_task_for_host.return_value = (None, None)

        mock_host = MagicMock()
        mock_host.name = 'test01'
        mock_host.vars = dict()
        mock_host.has_hostkey = True

        mock_task = MagicMock()
        mock_task._role = None
        mock_task.ignore_errors = False

        mock_group = MagicMock()
        mock_group.add_host.return_value = None

        def _get_host(host_name):
            if host_name == 'test01':
                return mock_host
            return None

        def _get_group(group_name):
            if group_name in ('all', 'foo'):
                return mock_group
            return None

        mock_inventory = MagicMock()
        mock_inventory._hosts_cache = dict()
        mock_inventory.get_host.side_effect = _get_host
        mock_inventory.get_group.side_effect = _get_group
        mock_inventory.clear_pattern_cache.return_value = None
        mock_inventory.get_host_vars.return_value = {}

        mock_var_mgr = MagicMock()
        mock_var_mgr.set_host_variable.return_value = None
        mock_var_mgr.set_host_facts.return_value = None

        strategy_base = StrategyBase(tqm=mock_tqm)
        strategy_base._inventory = mock_inventory
        strategy_base._variable_manager = mock_var_mgr
        strategy_base._blocked_hosts = dict()
        strategy_base._notified_handlers = dict()

        results = strategy_base._wait_on_pending_results(
            iterator=mock_iterator)
        self.assertEqual(len(results), 0)

        task_result = TaskResult(host=mock_host,
                                 task=mock_task,
                                 return_data=dict(changed=True))
        queue_items.append(('host_task_ok', task_result))
        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1
        results = strategy_base._wait_on_pending_results(
            iterator=mock_iterator)
        self.assertEqual(len(results), 1)
        self.assertEqual(results[0], task_result)
        self.assertEqual(strategy_base._pending_results, 0)
        self.assertNotIn('test01', strategy_base._blocked_hosts)

        task_result = TaskResult(host=mock_host,
                                 task=mock_task,
                                 return_data='{"failed":true}')
        queue_items.append(('host_task_failed', task_result))
        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1
        results = strategy_base._process_pending_results(
            iterator=mock_iterator)
        self.assertEqual(len(results), 1)
        self.assertEqual(results[0], task_result)
        self.assertEqual(strategy_base._pending_results, 0)
        self.assertNotIn('test01', strategy_base._blocked_hosts)
        self.assertIn('test01', mock_tqm._failed_hosts)
        del mock_tqm._failed_hosts['test01']

        task_result = TaskResult(host=mock_host,
                                 task=mock_task,
                                 return_data='{}')
        queue_items.append(('host_unreachable', task_result))
        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1
        results = strategy_base._wait_on_pending_results(
            iterator=mock_iterator)
        self.assertEqual(len(results), 1)
        self.assertEqual(results[0], task_result)
        self.assertEqual(strategy_base._pending_results, 0)
        self.assertNotIn('test01', strategy_base._blocked_hosts)
        self.assertIn('test01', mock_tqm._unreachable_hosts)
        del mock_tqm._unreachable_hosts['test01']

        task_result = TaskResult(host=mock_host,
                                 task=mock_task,
                                 return_data='{}')
        queue_items.append(('host_task_skipped', task_result))
        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1
        results = strategy_base._wait_on_pending_results(
            iterator=mock_iterator)
        self.assertEqual(len(results), 1)
        self.assertEqual(results[0], task_result)
        self.assertEqual(strategy_base._pending_results, 0)
        self.assertNotIn('test01', strategy_base._blocked_hosts)

        strategy_base._blocked_hosts['test01'] = True
        strategy_base._pending_results = 1

        queue_items.append(
            ('add_host',
             dict(add_host=dict(host_name='newhost01', new_groups=['foo']))))
        results = strategy_base._process_pending_results(
            iterator=mock_iterator)
        self.assertEqual(len(results), 0)
        self.assertEqual(strategy_base._pending_results, 1)
        self.assertIn('test01', strategy_base._blocked_hosts)

        queue_items.append(
            ('add_group', mock_host, dict(add_group=dict(group_name='foo'))))
        results = strategy_base._process_pending_results(
            iterator=mock_iterator)
        self.assertEqual(len(results), 0)
        self.assertEqual(strategy_base._pending_results, 1)
        self.assertIn('test01', strategy_base._blocked_hosts)

        task_result = TaskResult(host=mock_host,
                                 task=mock_task,
                                 return_data=dict(changed=True))
        queue_items.append(('notify_handler', task_result, 'test handler'))
        results = strategy_base._process_pending_results(
            iterator=mock_iterator)
        self.assertEqual(len(results), 0)
        self.assertEqual(strategy_base._pending_results, 1)
        self.assertIn('test01', strategy_base._blocked_hosts)
        self.assertIn('test handler', strategy_base._notified_handlers)
        self.assertIn(mock_host,
                      strategy_base._notified_handlers['test handler'])

        queue_items.append(
            ('set_host_var', mock_host, mock_task, None, 'foo', 'bar'))
        results = strategy_base._process_pending_results(
            iterator=mock_iterator)
        self.assertEqual(len(results), 0)
        self.assertEqual(strategy_base._pending_results, 1)

        queue_items.append(
            ('set_host_facts', mock_host, mock_task, None, 'foo', dict()))
        results = strategy_base._process_pending_results(
            iterator=mock_iterator)
        self.assertEqual(len(results), 0)
        self.assertEqual(strategy_base._pending_results, 1)

        queue_items.append(('bad'))
        self.assertRaises(AnsibleError,
                          strategy_base._process_pending_results,
                          iterator=mock_iterator)
Beispiel #22
0
    def _run(self):
        '''
        Called when the process is started.  Pushes the result onto the
        results queue. We also remove the host from the blocked hosts list, to
        signify that they are ready for their next task.
        '''

        # import cProfile, pstats, StringIO
        # pr = cProfile.Profile()
        # pr.enable()

        if HAS_PYCRYPTO_ATFORK:
            atfork()

        try:
            # execute the task and build a TaskResult from the result
            display.debug("running TaskExecutor() for %s/%s" %
                          (self._host, self._task))
            executor_result = TaskExecutor(self._host, self._task,
                                           self._task_vars, self._play_context,
                                           self._new_stdin, self._loader,
                                           self._shared_loader_obj,
                                           self._final_q).run()

            display.debug("done running TaskExecutor() for %s/%s [%s]" %
                          (self._host, self._task, self._task._uuid))
            self._host.vars = dict()
            self._host.groups = []
            task_result = TaskResult(
                self._host.name,
                self._task._uuid,
                executor_result,
                task_fields=self._task.dump_attrs(),
            )

            # put the result on the result queue
            display.debug("sending task result for task %s" % self._task._uuid)
            self._final_q.put(task_result)
            display.debug("done sending task result for task %s" %
                          self._task._uuid)

        except AnsibleConnectionFailure:
            self._host.vars = dict()
            self._host.groups = []
            task_result = TaskResult(
                self._host.name,
                self._task._uuid,
                dict(unreachable=True),
                task_fields=self._task.dump_attrs(),
            )
            self._final_q.put(task_result, block=False)

        except Exception as e:
            if not isinstance(e,
                              (IOError, EOFError, KeyboardInterrupt,
                               SystemExit)) or isinstance(e, TemplateNotFound):
                try:
                    self._host.vars = dict()
                    self._host.groups = []
                    task_result = TaskResult(
                        self._host.name,
                        self._task._uuid,
                        dict(failed=True,
                             exception=to_text(traceback.format_exc()),
                             stdout=''),
                        task_fields=self._task.dump_attrs(),
                    )
                    self._final_q.put(task_result, block=False)
                except Exception:
                    display.debug(u"WORKER EXCEPTION: %s" % to_text(e))
                    display.debug(u"WORKER TRACEBACK: %s" %
                                  to_text(traceback.format_exc()))

        display.debug("WORKER PROCESS EXITING")
Beispiel #23
0
    def run(self):
        '''
        Called when the process is started.  Pushes the result onto the
        results queue. We also remove the host from the blocked hosts list, to
        signify that they are ready for their next task.
        '''

        #import cProfile, pstats, StringIO
        #pr = cProfile.Profile()
        #pr.enable()

        if HAS_ATFORK:
            atfork()

        try:
            # execute the task and build a TaskResult from the result
            display.debug("running TaskExecutor() for %s/%s" % (self._host, self._task))
            executor_result = TaskExecutor(
                self._host,
                self._task,
                self._task_vars,
                self._play_context,
                self._new_stdin,
                self._loader,
                self._shared_loader_obj,
                self._rslt_q
            ).run()

            display.debug("done running TaskExecutor() for %s/%s" % (self._host, self._task))
            self._host.vars = dict()
            self._host.groups = []
            task_result = TaskResult(self._host.name, self._task._uuid, executor_result)

            # put the result on the result queue
            display.debug("sending task result")
            self._rslt_q.put(task_result)
            display.debug("done sending task result")

        except AnsibleConnectionFailure:
            self._host.vars = dict()
            self._host.groups = []
            task_result = TaskResult(self._host.name, self._task._uuid, dict(unreachable=True))
            self._rslt_q.put(task_result, block=False)

        except Exception as e:
            if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound):
                try:
                    self._host.vars = dict()
                    self._host.groups = []
                    task_result = TaskResult(self._host.name, self._task._uuid, dict(failed=True, exception=to_text(traceback.format_exc()), stdout=''))
                    self._rslt_q.put(task_result, block=False)
                except:
                    display.debug(u"WORKER EXCEPTION: %s" % to_text(e))
                    display.debug(u"WORKER TRACEBACK: %s" % to_text(traceback.format_exc()))

        display.debug("WORKER PROCESS EXITING")

        #pr.disable()
        #s = StringIO.StringIO()
        #sortby = 'time'
        #ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
        #ps.print_stats()
        #with open('worker_%06d.stats' % os.getpid(), 'w') as f:
        #    f.write(s.getvalue())

        sys.exit(0)