예제 #1
0
    def get_next_task_for_host(self, host, peek=False):

        s = self.get_host_state(host)

        task = None
        if s.run_state == self.ITERATING_COMPLETE:
            return None
        elif s.run_state == self.ITERATING_SETUP:
            s.run_state = self.ITERATING_TASKS
            s.pending_setup = True
            if self._play.gather_facts == 'smart' and not host._gathered_facts or boolean(self._play.gather_facts):
                if not peek:
                    # mark the host as having gathered facts
                    host.set_gathered_facts(True)

                task = Task()
                task.action = 'setup'
                task.args   = {}
                task.set_loader(self._play._loader)
            else:
                s.pending_setup = False

        if not task:
            (s, task) = self._get_next_task_from_state(s, peek=peek)

        if task and task._role:
            # if we had a current role, mark that role as completed
            if s.cur_role and task._role != s.cur_role and s.cur_role._had_task_run and not peek:
                s.cur_role._completed = True
            s.cur_role = task._role

        if not peek:
            self._host_states[host.name] = s

        return (s, task)
예제 #2
0
    def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
        self._play = play
        self._blocks = []

        setup_block = Block(play=self._play)
        setup_task = Task(block=setup_block)
        setup_task.action = 'setup'
        setup_task.tags   = ['always']
        setup_task.args   = {}
        setup_task.set_loader(self._play._loader)
        setup_block.block = [setup_task]

        setup_block = setup_block.filter_tagged_tasks(play_context, all_vars)
        self._blocks.append(setup_block)

        for block in self._play.compile():
            new_block = block.filter_tagged_tasks(play_context, all_vars)
            if new_block.has_tasks():
                self._blocks.append(new_block)

        self._host_states = {}
        start_at_matched = False
        for host in inventory.get_hosts(self._play.hosts):
            self._host_states[host.name] = HostState(blocks=self._blocks)
            # if the host's name is in the variable manager's fact cache, then set
            # its _gathered_facts flag to true for smart gathering tests later
            if host.name in variable_manager._fact_cache:
                host._gathered_facts = True
            # if we're looking to start at a specific task, iterate through
            # the tasks for this host until we find the specified task
            if play_context.start_at_task is not None and not start_at_done:
                while True:
                    (s, task) = self.get_next_task_for_host(host, peek=True)
                    if s.run_state == self.ITERATING_COMPLETE:
                        break
                    if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \
                       task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
                        start_at_matched = True
                        break
                    else:
                        self.get_next_task_for_host(host)

                # finally, reset the host's state to ITERATING_SETUP
                if start_at_matched:
                    self._host_states[host.name].did_start_at_task = True
                    self._host_states[host.name].run_state = self.ITERATING_SETUP

        if start_at_matched:
            # we have our match, so clear the start_at_task field on the
            # play context to flag that we've started at a task (and future
            # plays won't try to advance)
            play_context.start_at_task = None

        # Extend the play handlers list to include the handlers defined in roles
        self._play.handlers.extend(play.compile_roles_handlers())
예제 #3
0
    def get_next_task_for_host(self, host, peek=False):

        display.debug("getting the next task for host %s" % host.name)
        s = self.get_host_state(host)

        task = None
        if s.run_state == self.ITERATING_COMPLETE:
            display.debug("host %s is done iterating, returning" % host.name)
            return (None, None)
        elif s.run_state == self.ITERATING_SETUP:
            s.run_state = self.ITERATING_TASKS
            s.pending_setup = True

            # Gather facts if the default is 'smart' and we have not yet
            # done it for this host; or if 'explicit' and the play sets
            # gather_facts to True; or if 'implicit' and the play does
            # NOT explicitly set gather_facts to False.

            gathering = C.DEFAULT_GATHERING
            implied = self._play.gather_facts is None or boolean(self._play.gather_facts)

            if (gathering == 'implicit' and implied) or \
               (gathering == 'explicit' and boolean(self._play.gather_facts)) or \
               (gathering == 'smart' and implied and not host._gathered_facts):
                if not peek:
                    # mark the host as having gathered facts
                    host.set_gathered_facts(True)

                task = Task()
                task.action = 'setup'
                task.args   = {}
                task.set_loader(self._play._loader)
            else:
                s.pending_setup = False

        if not task:
            (s, task) = self._get_next_task_from_state(s, peek=peek)

        if task and task._role:
            # if we had a current role, mark that role as completed
            if s.cur_role and task._role != s.cur_role and host.name in s.cur_role._had_task_run and not peek:
                s.cur_role._completed[host.name] = True
            s.cur_role = task._role

        if not peek:
            self._host_states[host.name] = s

        display.debug("done getting next task for host %s" % host.name)
        display.debug(" ^ task is: %s" % task)
        display.debug(" ^ state is: %s" % s)
        return (s, task)
예제 #4
0
파일: linear.py 프로젝트: imace/ansible
    def run(self, iterator, play_context):
        '''
        The linear strategy is simple - get the next task and queue
        it for all hosts, then wait for the queue to drain before
        moving on to the next task
        '''

        # iteratate over each task, while there is one left to run
        result = True
        work_to_do = True
        while work_to_do and not self._tqm._terminated:

            try:
                self._display.debug(
                    "getting the remaining hosts for this loop")
                hosts_left = self._inventory.get_hosts(iterator._play.hosts)
                self._display.debug(
                    "done getting the remaining hosts for this loop")

                # queue up this task for each host in the inventory
                callback_sent = False
                work_to_do = False

                host_results = []
                host_tasks = self._get_next_task_lockstep(hosts_left, iterator)

                # skip control
                skip_rest = False
                choose_step = True

                for (host, task) in host_tasks:
                    if not task:
                        continue

                    run_once = False
                    work_to_do = True

                    # test to see if the task across all hosts points to an action plugin which
                    # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
                    # will only send this task to the first host in the list.

                    try:
                        action = action_loader.get(task.action,
                                                   class_only=True)
                        if task.run_once or getattr(action, 'BYPASS_HOST_LOOP',
                                                    False):
                            run_once = True
                    except KeyError:
                        # we don't care here, because the action may simply not have a
                        # corresponding action plugin
                        pass

                    # check to see if this task should be skipped, due to it being a member of a
                    # role which has already run (and whether that role allows duplicate execution)
                    if task._role and task._role.has_run(host):
                        # If there is no metadata, the default behavior is to not allow duplicates,
                        # if there is metadata, check to see if the allow_duplicates flag was set to true
                        if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
                            self._display.debug(
                                "'%s' skipped because role has already run" %
                                task)
                            continue

                    if task.action == 'meta':
                        self._execute_meta(task, play_context, iterator)
                    else:
                        # handle step if needed, skip meta actions as they are used internally
                        if self._step and choose_step:
                            if self._take_step(task):
                                choose_step = False
                            else:
                                skip_rest = True
                                break

                        self._display.debug("getting variables")
                        task_vars = self._variable_manager.get_vars(
                            loader=self._loader,
                            play=iterator._play,
                            host=host,
                            task=task)
                        task_vars = self.add_tqm_variables(task_vars,
                                                           play=iterator._play)
                        templar = Templar(loader=self._loader,
                                          variables=task_vars)
                        self._display.debug("done getting variables")

                        if not callback_sent:
                            temp_task = task.copy()
                            try:
                                temp_task.name = unicode(
                                    templar.template(temp_task.name,
                                                     fail_on_undefined=False))
                            except:
                                # just ignore any errors during task name templating,
                                # we don't care if it just shows the raw name
                                pass
                            self._tqm.send_callback(
                                'v2_playbook_on_task_start',
                                temp_task,
                                is_conditional=False)
                            callback_sent = True

                        self._blocked_hosts[host.get_name()] = True
                        self._queue_task(host, task, task_vars, play_context)

                    results = self._process_pending_results(iterator)
                    host_results.extend(results)

                    # if we're bypassing the host loop, break out now
                    if run_once:
                        break

                # go to next host/task group
                if skip_rest:
                    continue

                self._display.debug(
                    "done queuing things up, now waiting for results queue to drain"
                )
                results = self._wait_on_pending_results(iterator)
                host_results.extend(results)

                if not work_to_do and len(iterator.get_failed_hosts()) > 0:
                    self._display.debug("out of hosts to run on")
                    self._tqm.send_callback(
                        'v2_playbook_on_no_hosts_remaining')
                    result = False
                    break

                try:
                    included_files = IncludedFile.process_include_results(
                        host_results,
                        self._tqm,
                        iterator=iterator,
                        loader=self._loader,
                        variable_manager=self._variable_manager)
                except AnsibleError, e:
                    return False

                if len(included_files) > 0:
                    noop_task = Task()
                    noop_task.action = 'meta'
                    noop_task.args['_raw_params'] = 'noop'
                    noop_task.set_loader(iterator._play._loader)

                    all_blocks = dict((host, []) for host in hosts_left)
                    for included_file in included_files:
                        # included hosts get the task list while those excluded get an equal-length
                        # list of noop tasks, to make sure that they continue running in lock-step
                        try:
                            new_blocks = self._load_included_file(
                                included_file, iterator=iterator)
                        except AnsibleError, e:
                            for host in included_file._hosts:
                                iterator.mark_host_failed(host)
                            self._display.warning(str(e))
                            continue

                        for new_block in new_blocks:
                            noop_block = Block(parent_block=task._block)
                            noop_block.block = [
                                noop_task for t in new_block.block
                            ]
                            noop_block.always = [
                                noop_task for t in new_block.always
                            ]
                            noop_block.rescue = [
                                noop_task for t in new_block.rescue
                            ]
                            for host in hosts_left:
                                if host in included_file._hosts:
                                    task_vars = self._variable_manager.get_vars(
                                        loader=self._loader,
                                        play=iterator._play,
                                        host=host,
                                        task=included_file._task)
                                    final_block = new_block.filter_tagged_tasks(
                                        play_context, task_vars)
                                    all_blocks[host].append(final_block)
                                else:
                                    all_blocks[host].append(noop_block)

                    for host in hosts_left:
                        iterator.add_tasks(host, all_blocks[host])

                self._display.debug("results queue empty")
예제 #5
0
    def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
        self._play = play
        self._blocks = []
        self._variable_manager = variable_manager

        self._task_uuid_cache = dict()

        # Default options to gather
        gather_subset = play_context.gather_subset
        gather_timeout = play_context.gather_timeout
        fact_path = play_context.fact_path

        # Retrieve subset to gather
        if self._play.gather_subset is not None:
            gather_subset = self._play.gather_subset
        # Retrieve timeout for gather
        if self._play.gather_timeout is not None:
            gather_timeout = self._play.gather_timeout
        # Retrieve fact_path
        if self._play.fact_path is not None:
            fact_path = self._play.fact_path

        setup_block = Block(play=self._play)
        setup_task = Task(block=setup_block)
        setup_task.action = 'setup'
        setup_task.name = 'Gathering Facts'
        setup_task.tags = ['always']
        setup_task.args = {
            'gather_subset': gather_subset,
        }
        if gather_timeout:
            setup_task.args['gather_timeout'] = gather_timeout
        if fact_path:
            setup_task.args['fact_path'] = fact_path
        setup_task.set_loader(self._play._loader)
        # short circuit fact gathering if the entire playbook is conditional
        if self._play._included_conditional is not None:
            setup_task.when = self._play._included_conditional[:]
        setup_block.block = [setup_task]

        setup_block = setup_block.filter_tagged_tasks(play_context, all_vars)
        self._blocks.append(setup_block)
        self.cache_block_tasks(setup_block)

        for block in self._play.compile():
            new_block = block.filter_tagged_tasks(play_context, all_vars)
            if new_block.has_tasks():
                self.cache_block_tasks(new_block)
                self._blocks.append(new_block)

        for handler_block in self._play.handlers:
            self.cache_block_tasks(handler_block)

        self._host_states = {}
        start_at_matched = False
        for host in inventory.get_hosts(self._play.hosts):
            self._host_states[host.name] = HostState(blocks=self._blocks)
            # if we're looking to start at a specific task, iterate through
            # the tasks for this host until we find the specified task
            if play_context.start_at_task is not None and not start_at_done:
                while True:
                    (s, task) = self.get_next_task_for_host(host, peek=True)
                    if s.run_state == self.ITERATING_COMPLETE:
                        break
                    if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \
                       task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
                        start_at_matched = True
                        break
                    else:
                        self.get_next_task_for_host(host)

                # finally, reset the host's state to ITERATING_SETUP
                if start_at_matched:
                    self._host_states[host.name].did_start_at_task = True
                    self._host_states[host.name].run_state = self.ITERATING_SETUP

        if start_at_matched:
            # we have our match, so clear the start_at_task field on the
            # play context to flag that we've started at a task (and future
            # plays won't try to advance)
            play_context.start_at_task = None
예제 #6
0
    def get_next_task_for_host(self, host, peek=False, lock_step=True):
        s = self.get_host_state(host)

        task = None
        if s.run_state == self.ITERATING_COMPLETE:
            return None
        else:
            while True:
                try:
                    cur_block = s._blocks[s.cur_block]
                except IndexError:
                    s.run_state = self.ITERATING_COMPLETE
                    break

                if s.run_state == self.ITERATING_SETUP:
                    s.run_state = self.ITERATING_TASKS
                    if self._play._gather_facts == 'smart' and not host.gathered_facts or boolean(self._play._gather_facts):
                        # mark the host as having gathered facts
                        host.set_gathered_facts(True)

                        task = Task()
                        task.action = 'setup'
                        task.set_loader(self._play._loader)

                elif s.run_state == self.ITERATING_TASKS:
                    # clear the pending setup flag, since we're past that and it didn't fail
                    if s.pending_setup:
                        s.pending_setup = False

                    if s.fail_state & self.FAILED_TASKS == self.FAILED_TASKS:
                        s.run_state = self.ITERATING_RESCUE
                    elif s.cur_regular_task >= len(cur_block.block):
                        s.run_state = self.ITERATING_ALWAYS
                    else:
                        task = cur_block.block[s.cur_regular_task]
                        s.cur_regular_task += 1
                        break
                elif s.run_state == self.ITERATING_RESCUE:
                    if s.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE:
                        s.run_state = self.ITERATING_ALWAYS
                    elif s.cur_rescue_task >= len(cur_block.rescue):
                        if len(cur_block.rescue) > 0:
                            s.fail_state = self.FAILED_NONE
                        s.run_state = self.ITERATING_ALWAYS
                    else:
                        task = cur_block.rescue[s.cur_rescue_task]
                        s.cur_rescue_task += 1
                        break
                elif s.run_state == self.ITERATING_ALWAYS:
                    if s.cur_always_task >= len(cur_block.always):
                        if s.fail_state != self.FAILED_NONE:
                            s.run_state = self.ITERATING_COMPLETE
                            break
                        else:
                            s.cur_block += 1
                            s.cur_regular_task = 0
                            s.cur_rescue_task  = 0
                            s.cur_always_task  = 0
                            s.run_state = self.ITERATING_TASKS
                    else:
                        task= cur_block.always[s.cur_always_task]
                        s.cur_always_task += 1
                        break

        if task and task._role:
            # if we had a current role, mark that role as completed
            if s.cur_role and task._role != s.cur_role and s.cur_role._had_task_run and not peek:
                s.cur_role._completed = True

            s.cur_role = task._role

        if not peek:
            self._host_states[host.name] = s

        return (s, task)
예제 #7
0
파일: linear.py 프로젝트: bennojoy/distrib
    def run(self, iterator, connection_info):
        '''
        The linear strategy is simple - get the next task and queue
        it for all hosts, then wait for the queue to drain before
        moving on to the next task
        '''

        # iteratate over each task, while there is one left to run
        result     = True
        work_to_do = True
        while work_to_do and not self._tqm._terminated:

            try:
                debug("getting the remaining hosts for this loop")
                hosts_left = self._inventory.get_hosts(iterator._play.hosts)
                debug("done getting the remaining hosts for this loop")

                # queue up this task for each host in the inventory
                callback_sent = False
                work_to_do = False

                host_results = []
                host_tasks = self._get_next_task_lockstep(hosts_left, iterator)

                for (host, task) in host_tasks:
                    if not task:
                        continue

                    run_once = False
                    work_to_do = True

                    # test to see if the task across all hosts points to an action plugin which
                    # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
                    # will only send this task to the first host in the list.

                    try:
                        action = action_loader.get(task.action, class_only=True)
                        if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
                            run_once = True
                    except KeyError:
                        # we don't care here, because the action may simply not have a
                        # corresponding action plugin
                        pass

                    # check to see if this task should be skipped, due to it being a member of a
                    # role which has already run (and whether that role allows duplicate execution)
                    if task._role and task._role.has_run():
                        # If there is no metadata, the default behavior is to not allow duplicates,
                        # if there is metadata, check to see if the allow_duplicates flag was set to true
                        if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
                            debug("'%s' skipped because role has already run" % task)
                            continue

                    if task.action == 'meta':
                        # meta tasks store their args in the _raw_params field of args,
                        # since they do not use k=v pairs, so get that
                        meta_action = task.args.get('_raw_params')
                        if meta_action == 'noop':
                            # FIXME: issue a callback for the noop here?
                            continue
                        elif meta_action == 'flush_handlers':
                            self.run_handlers(iterator, connection_info)
                        else:
                            raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
                    else:
                        debug("getting variables")
                        task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
                        task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
                        templar = Templar(loader=self._loader, variables=task_vars)
                        debug("done getting variables")

                        if not callback_sent:
                            temp_task = task.copy()
                            temp_task.name = templar.template(temp_task.get_name(), fail_on_undefined=False)
                            self._tqm.send_callback('v2_playbook_on_task_start', temp_task, is_conditional=False)
                            callback_sent = True

                        self._blocked_hosts[host.get_name()] = True

                       #Code for Distributed ansible.
                        self._pending_results += 1
                        dtask = {}
                        (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker]
                        t_uuid              = task._uuid
                        task._uuid          = None
                        dtask['host']       = jsonpickle.encode(host)
                        dtask['task_vars']  = jsonpickle.encode(task_vars)
                        dtask['task']       = jsonpickle.encode(task)
                        dtask['conn_info']  = jsonpickle.encode(connection_info)
                        dtask['base_dir']   = self._loader.get_basedir()
#                       dtask['uuid']       = t_uuid 
                        final_task          = json.dumps(dtask)
                        remote_task         = TaskRpcClient()
                        task_response        = remote_task.put(final_task)
                    
#                        self._queue_task(host, task, task_vars, connection_info)

                    task_result = json.loads(task_response)
                    rslt_q.put(jsonpickle.decode(task_result),block=False)
                    results = self._process_pending_results(iterator)
                    host_results.extend(results)

                    # if we're bypassing the host loop, break out now
                    if run_once:
                        break

                debug("done queuing things up, now waiting for results queue to drain")
                results = self._wait_on_pending_results(iterator)
                host_results.extend(results)

                if not work_to_do and len(iterator.get_failed_hosts()) > 0:
                    debug("out of hosts to run on")
                    self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
                    result = False
                    break

                try:
                    included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager)
                except AnsibleError, e:
                    return False

                if len(included_files) > 0:
                    noop_task = Task()
                    noop_task.action = 'meta'
                    noop_task.args['_raw_params'] = 'noop'
                    noop_task.set_loader(iterator._play._loader)

                    all_blocks = dict((host, []) for host in hosts_left)
                    for included_file in included_files:
                        # included hosts get the task list while those excluded get an equal-length
                        # list of noop tasks, to make sure that they continue running in lock-step
                        try:
                            new_blocks = self._load_included_file(included_file, iterator=iterator)
                        except AnsibleError, e:
                            for host in included_file._hosts:
                                iterator.mark_host_failed(host)
                            # FIXME: callback here?
                            print(e)
                            continue

                        for new_block in new_blocks:
                            noop_block = Block(parent_block=task._block)
                            noop_block.block  = [noop_task for t in new_block.block]
                            noop_block.always = [noop_task for t in new_block.always]
                            noop_block.rescue = [noop_task for t in new_block.rescue]
                            for host in hosts_left:
                                if host in included_file._hosts:
                                    task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task)
                                    final_block = new_block.filter_tagged_tasks(connection_info, task_vars)
                                    all_blocks[host].append(final_block)
                                else:
                                    all_blocks[host].append(noop_block)

                    for host in hosts_left:
                        iterator.add_tasks(host, all_blocks[host])

                debug("results queue empty")
예제 #8
0
    def run(self, iterator, connection_info):
        '''
        The linear strategy is simple - get the next task and queue
        it for all hosts, then wait for the queue to drain before
        moving on to the next task
        '''

        result = True

        # iteratate over each task, while there is one left to run
        work_to_do = True
        while work_to_do and not self._tqm._terminated:

            try:
                debug("getting the remaining hosts for this loop")
                self._tqm._failed_hosts = iterator.get_failed_hosts()
                hosts_left = self.get_hosts_remaining(iterator._play)
                debug("done getting the remaining hosts for this loop")
                if len(hosts_left) == 0:
                    debug("out of hosts to run on")
                    self._callback.playbook_on_no_hosts_remaining()
                    result = False
                    break

                # queue up this task for each host in the inventory
                callback_sent = False
                work_to_do = False

                host_results = []
                host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
                for (host, task) in host_tasks:
                    if not task:
                        continue

                    run_once = False
                    work_to_do = True

                    # test to see if the task across all hosts points to an action plugin which
                    # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
                    # will only send this task to the first host in the list.

                    try:
                        action = action_loader.get(task.action,
                                                   class_only=True)
                        if task.run_once or getattr(action, 'BYPASS_HOST_LOOP',
                                                    False):
                            run_once = True
                    except KeyError:
                        # we don't care here, because the action may simply not have a
                        # corresponding action plugin
                        pass

                    debug("getting variables")
                    task_vars = self._variable_manager.get_vars(
                        loader=self._loader,
                        play=iterator._play,
                        host=host,
                        task=task)
                    debug("done getting variables")

                    # check to see if this task should be skipped, due to it being a member of a
                    # role which has already run (and whether that role allows duplicate execution)
                    if task._role and task._role.has_run():
                        # If there is no metadata, the default behavior is to not allow duplicates,
                        # if there is metadata, check to see if the allow_duplicates flag was set to true
                        if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
                            debug("'%s' skipped because role has already run" %
                                  task)
                            continue

                    if not task.evaluate_tags(
                            connection_info.only_tags,
                            connection_info.skip_tags,
                            task_vars) and task.action != 'setup':
                        debug("'%s' failed tag evaluation" % task)
                        continue

                    if task.action == 'meta':
                        # meta tasks store their args in the _raw_params field of args,
                        # since they do not use k=v pairs, so get that
                        meta_action = task.args.get('_raw_params')
                        if meta_action == 'noop':
                            # FIXME: issue a callback for the noop here?
                            print("%s => NOOP" % host)
                            continue
                        elif meta_action == 'flush_handlers':
                            self.run_handlers(iterator, connection_info)
                        else:
                            raise AnsibleError(
                                "invalid meta action requested: %s" %
                                meta_action,
                                obj=task._ds)
                    else:
                        if not callback_sent:
                            self._callback.playbook_on_task_start(
                                task.get_name(), False)
                            callback_sent = True

                        self._blocked_hosts[host.get_name()] = True
                        self._queue_task(host, task, task_vars,
                                         connection_info)

                    results = self._process_pending_results(iterator)
                    host_results.extend(results)

                    # if we're bypassing the host loop, break out now
                    if run_once:
                        break

                debug(
                    "done queuing things up, now waiting for results queue to drain"
                )
                results = self._wait_on_pending_results(iterator)
                host_results.extend(results)

                # FIXME: this needs to be somewhere else
                class IncludedFile:
                    def __init__(self, filename, args, task):
                        self._filename = filename
                        self._args = args
                        self._task = task
                        self._hosts = []

                    def add_host(self, host):
                        if host not in self._hosts:
                            self._hosts.append(host)

                    def __eq__(self, other):
                        return other._filename == self._filename and other._args == self._args

                    def __repr__(self):
                        return "%s (%s): %s" % (self._filename, self._args,
                                                self._hosts)

                included_files = []
                for res in host_results:
                    if res._task.action == 'include':
                        if res._task.loop:
                            include_results = res._result['results']
                        else:
                            include_results = [res._result]

                        for include_result in include_results:
                            original_task = iterator.get_original_task(
                                res._host, res._task)
                            if original_task and original_task._role:
                                include_file = self._loader.path_dwim_relative(
                                    original_task._role._role_path, 'tasks',
                                    include_file)
                            else:
                                include_file = self._loader.path_dwim(
                                    res._task.args.get('_raw_params'))

                            include_variables = include_result.get(
                                'include_variables', dict())
                            if 'item' in include_result:
                                include_variables['item'] = include_result[
                                    'item']

                            inc_file = IncludedFile(include_file,
                                                    include_variables,
                                                    original_task)

                            try:
                                pos = included_files.index(inc_file)
                                inc_file = included_files[pos]
                            except ValueError:
                                included_files.append(inc_file)

                            inc_file.add_host(res._host)

                if len(included_files) > 0:
                    noop_task = Task()
                    noop_task.action = 'meta'
                    noop_task.args['_raw_params'] = 'noop'
                    noop_task.set_loader(iterator._play._loader)

                    all_tasks = dict((host, []) for host in hosts_left)
                    for included_file in included_files:
                        # included hosts get the task list while those excluded get an equal-length
                        # list of noop tasks, to make sure that they continue running in lock-step
                        new_tasks = self._load_included_file(included_file)
                        noop_tasks = [noop_task for t in new_tasks]
                        for host in hosts_left:
                            if host in included_file._hosts:
                                all_tasks[host].extend(new_tasks)
                            else:
                                all_tasks[host].extend(noop_tasks)

                    for host in hosts_left:
                        iterator.add_tasks(host, all_tasks[host])

                debug("results queue empty")
            except (IOError, EOFError), e:
                debug("got IOError/EOFError in task loop: %s" % e)
                # most likely an abort, return failed
                return 1
예제 #9
0
    def run(self, iterator, play_context):
        '''
        The linear strategy is simple - get the next task and queue
        it for all hosts, then wait for the queue to drain before
        moving on to the next task
        '''

        # iteratate over each task, while there is one left to run
        result     = True
        work_to_do = True
        while work_to_do and not self._tqm._terminated:

            try:
                self._display.debug("getting the remaining hosts for this loop")
                hosts_left = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
                self._display.debug("done getting the remaining hosts for this loop")

                # queue up this task for each host in the inventory
                callback_sent = False
                work_to_do = False

                host_results = []
                host_tasks = self._get_next_task_lockstep(hosts_left, iterator)

                # skip control
                skip_rest   = False
                choose_step = True

                for (host, task) in host_tasks:
                    if not task:
                        continue

                    run_once = False
                    work_to_do = True


                    # test to see if the task across all hosts points to an action plugin which
                    # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
                    # will only send this task to the first host in the list.

                    try:
                        action = action_loader.get(task.action, class_only=True)
                        if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
                            run_once = True
                    except KeyError:
                        # we don't care here, because the action may simply not have a
                        # corresponding action plugin
                        pass

                    # check to see if this task should be skipped, due to it being a member of a
                    # role which has already run (and whether that role allows duplicate execution)
                    if task._role and task._role.has_run(host):
                        # If there is no metadata, the default behavior is to not allow duplicates,
                        # if there is metadata, check to see if the allow_duplicates flag was set to true
                        if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
                            self._display.debug("'%s' skipped because role has already run" % task)
                            continue

                    if task.action == 'meta':
                        self._execute_meta(task, play_context, iterator)
                    else:
                        # handle step if needed, skip meta actions as they are used internally
                        if self._step and choose_step:
                            if self._take_step(task):
                                choose_step = False
                            else:
                                skip_rest = True
                                break

                        self._display.debug("getting variables")
                        task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
                        task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
                        templar = Templar(loader=self._loader, variables=task_vars)
                        self._display.debug("done getting variables")

                        if not callback_sent:
                            display.debug("sending task start callback, copying the task so we can template it temporarily")
                            saved_name = task.name
                            display.debug("done copying, going to template now")
                            try:
                                task.name = text_type(templar.template(task.name, fail_on_undefined=False))
                                display.debug("done templating")
                            except:
                                # just ignore any errors during task name templating,
                                # we don't care if it just shows the raw name
                                display.debug("templating failed for some reason")
                                pass
                            display.debug("here goes the callback...")
                            self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
                            task.name = saved_name
                            callback_sent = True
                            display.debug("sending task start callback")

                        self._blocked_hosts[host.get_name()] = True
                        self._queue_task(host, task, task_vars, play_context)

                    results = self._process_pending_results(iterator)
                    host_results.extend(results)

                    # if we're bypassing the host loop, break out now
                    if run_once:
                        break

                # go to next host/task group
                if skip_rest:
                    continue

                self._display.debug("done queuing things up, now waiting for results queue to drain")
                results = self._wait_on_pending_results(iterator)
                host_results.extend(results)

                if not work_to_do and len(iterator.get_failed_hosts()) > 0:
                    self._display.debug("out of hosts to run on")
                    self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
                    result = False
                    break

                try:
                    included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager)
                except AnsibleError as e:
                    return False

                if len(included_files) > 0:
                    noop_task = Task()
                    noop_task.action = 'meta'
                    noop_task.args['_raw_params'] = 'noop'
                    noop_task.set_loader(iterator._play._loader)

                    all_blocks = dict((host, []) for host in hosts_left)
                    for included_file in included_files:
                        # included hosts get the task list while those excluded get an equal-length
                        # list of noop tasks, to make sure that they continue running in lock-step
                        try:
                            new_blocks = self._load_included_file(included_file, iterator=iterator)
                        except AnsibleError as e:
                            for host in included_file._hosts:
                                iterator.mark_host_failed(host)
                            self._display.warning(str(e))
                            continue

                        for new_block in new_blocks:
                            noop_block = Block(parent_block=task._block)
                            noop_block.block  = [noop_task for t in new_block.block]
                            noop_block.always = [noop_task for t in new_block.always]
                            noop_block.rescue = [noop_task for t in new_block.rescue]
                            for host in hosts_left:
                                if host in included_file._hosts:
                                    task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task)
                                    final_block = new_block.filter_tagged_tasks(play_context, task_vars)
                                    all_blocks[host].append(final_block)
                                else:
                                    all_blocks[host].append(noop_block)

                    for host in hosts_left:
                        iterator.add_tasks(host, all_blocks[host])

                self._display.debug("results queue empty")
            except (IOError, EOFError) as e:
                self._display.debug("got IOError/EOFError in task loop: %s" % e)
                # most likely an abort, return failed
                return False

        # run the base class run() method, which executes the cleanup function
        # and runs any outstanding handlers which have been triggered

        return super(StrategyModule, self).run(iterator, play_context, result)
예제 #10
0
파일: linear.py 프로젝트: dataxu/ansible
    def run(self, iterator, connection_info):
        '''
        The linear strategy is simple - get the next task and queue
        it for all hosts, then wait for the queue to drain before
        moving on to the next task
        '''

        result = True

        # iteratate over each task, while there is one left to run
        work_to_do = True
        while work_to_do and not self._tqm._terminated:

            try:
                debug("getting the remaining hosts for this loop")
                self._tqm._failed_hosts = iterator.get_failed_hosts()
                hosts_left = self.get_hosts_remaining(iterator._play)
                debug("done getting the remaining hosts for this loop")
                if len(hosts_left) == 0:
                    debug("out of hosts to run on")
                    self._callback.playbook_on_no_hosts_remaining()
                    result = False
                    break

                # queue up this task for each host in the inventory
                callback_sent = False
                work_to_do = False

                host_results = []
                host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
                for (host, task) in host_tasks:
                    if not task:
                        continue

                    run_once = False
                    work_to_do = True

                    # test to see if the task across all hosts points to an action plugin which
                    # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
                    # will only send this task to the first host in the list.

                    try:
                        action = action_loader.get(task.action, class_only=True)
                        if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
                            run_once = True
                    except KeyError:
                        # we don't care here, because the action may simply not have a
                        # corresponding action plugin
                        pass

                    debug("getting variables")
                    task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
                    debug("done getting variables")

                    # check to see if this task should be skipped, due to it being a member of a
                    # role which has already run (and whether that role allows duplicate execution)
                    if task._role and task._role.has_run():
                        # If there is no metadata, the default behavior is to not allow duplicates,
                        # if there is metadata, check to see if the allow_duplicates flag was set to true
                        if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
                            debug("'%s' skipped because role has already run" % task)
                            continue

                    if not task.evaluate_tags(connection_info.only_tags, connection_info.skip_tags, task_vars) and task.action != 'setup':
                        debug("'%s' failed tag evaluation" % task)
                        continue

                    if task.action == 'meta':
                        # meta tasks store their args in the _raw_params field of args,
                        # since they do not use k=v pairs, so get that
                        meta_action = task.args.get('_raw_params')
                        if meta_action == 'noop':
                            # FIXME: issue a callback for the noop here?
                            print("%s => NOOP" % host)
                            continue
                        elif meta_action == 'flush_handlers':
                            self.run_handlers(iterator, connection_info)
                        else:
                            raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
                    else:
                        if not callback_sent:
                            self._callback.playbook_on_task_start(task.get_name(), False)
                            callback_sent = True

                        self._blocked_hosts[host.get_name()] = True
                        self._queue_task(host, task, task_vars, connection_info)

                    results = self._process_pending_results(iterator)
                    host_results.extend(results)

                    # if we're bypassing the host loop, break out now
                    if run_once:
                        break

                debug("done queuing things up, now waiting for results queue to drain")
                results = self._wait_on_pending_results(iterator)
                host_results.extend(results)

                # FIXME: this needs to be somewhere else
                class IncludedFile:
                    def __init__(self, filename, args, task):
                        self._filename = filename
                        self._args     = args
                        self._task     = task
                        self._hosts    = []
                    def add_host(self, host):
                        if host not in self._hosts:
                            self._hosts.append(host)
                    def __eq__(self, other):
                        return other._filename == self._filename and other._args == self._args
                    def __repr__(self):
                        return "%s (%s): %s" % (self._filename, self._args, self._hosts)

                included_files = []
                for res in host_results:
                    if res._task.action == 'include':
                        if res._task.loop:
                            include_results = res._result['results']
                        else:
                            include_results = [ res._result ]

                        for include_result in include_results:
                            original_task = iterator.get_original_task(res._host, res._task)
                            if original_task and original_task._role:
                                include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_file)
                            else:
                                include_file = self._loader.path_dwim(res._task.args.get('_raw_params'))

                            include_variables = include_result.get('include_variables', dict())
                            if 'item' in include_result:
                                include_variables['item'] = include_result['item']

                            inc_file = IncludedFile(include_file, include_variables, original_task)

                            try:
                                pos = included_files.index(inc_file)
                                inc_file = included_files[pos]
                            except ValueError:
                                included_files.append(inc_file)

                            inc_file.add_host(res._host)

                if len(included_files) > 0:
                    noop_task = Task()
                    noop_task.action = 'meta'
                    noop_task.args['_raw_params'] = 'noop'
                    noop_task.set_loader(iterator._play._loader)

                    all_tasks = dict((host, []) for host in hosts_left)
                    for included_file in included_files:
                        # included hosts get the task list while those excluded get an equal-length
                        # list of noop tasks, to make sure that they continue running in lock-step
                        new_tasks = self._load_included_file(included_file)
                        noop_tasks = [noop_task for t in new_tasks]
                        for host in hosts_left:
                            if host in included_file._hosts:
                                all_tasks[host].extend(new_tasks)
                            else:
                                all_tasks[host].extend(noop_tasks)

                    for host in hosts_left:
                        iterator.add_tasks(host, all_tasks[host])

                debug("results queue empty")
            except (IOError, EOFError), e:
                debug("got IOError/EOFError in task loop: %s" % e)
                # most likely an abort, return failed
                return 1
예제 #11
0
    def _get_next_task_lockstep(self, hosts, iterator):
        """
        Returns a list of (host, task) tuples, where the task may
        be a noop task to keep the iterator in lock step across
        all hosts.
        """

        noop_task = Task()
        noop_task.action = "meta"
        noop_task.args["_raw_params"] = "noop"
        noop_task.set_loader(iterator._play._loader)

        host_tasks = {}
        display.debug("building list of next tasks for hosts")
        for host in hosts:
            if not iterator.is_failed(host):
                host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
        display.debug("done building task lists")

        num_setups = 0
        num_tasks = 0
        num_rescue = 0
        num_always = 0

        display.debug("counting tasks in each state of execution")
        host_tasks_to_run = [
            (host, state_task) for host, state_task in iteritems(host_tasks) if state_task and state_task[1]
        ]

        if host_tasks_to_run:
            lowest_cur_block = min(
                (s.cur_block for h, (s, t) in host_tasks_to_run if s.run_state != PlayIterator.ITERATING_COMPLETE)
            )
        else:
            # empty host_tasks_to_run will just run till the end of the function
            # without ever touching lowest_cur_block
            lowest_cur_block = None

        for (k, v) in host_tasks_to_run:
            (s, t) = v

            if s.cur_block > lowest_cur_block:
                # Not the current block, ignore it
                continue

            if s.run_state == PlayIterator.ITERATING_SETUP:
                num_setups += 1
            elif s.run_state == PlayIterator.ITERATING_TASKS:
                num_tasks += 1
            elif s.run_state == PlayIterator.ITERATING_RESCUE:
                num_rescue += 1
            elif s.run_state == PlayIterator.ITERATING_ALWAYS:
                num_always += 1
        display.debug("done counting tasks in each state of execution")

        def _advance_selected_hosts(hosts, cur_block, cur_state):
            """
            This helper returns the task for all hosts in the requested
            state, otherwise they get a noop dummy task. This also advances
            the state of the host, since the given states are determined
            while using peek=True.
            """
            # we return the values in the order they were originally
            # specified in the given hosts array
            rvals = []
            display.debug("starting to advance hosts")
            for host in hosts:
                host_state_task = host_tasks.get(host.name)
                if host_state_task is None:
                    continue
                (s, t) = host_state_task
                if t is None:
                    continue
                if s.run_state == cur_state and s.cur_block == cur_block:
                    new_t = iterator.get_next_task_for_host(host)
                    rvals.append((host, t))
                else:
                    rvals.append((host, noop_task))
            display.debug("done advancing hosts to next task")
            return rvals

        # if any hosts are in ITERATING_SETUP, return the setup task
        # while all other hosts get a noop
        if num_setups:
            display.debug("advancing hosts in ITERATING_SETUP")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)

        # if any hosts are in ITERATING_TASKS, return the next normal
        # task for these hosts, while all other hosts get a noop
        if num_tasks:
            display.debug("advancing hosts in ITERATING_TASKS")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)

        # if any hosts are in ITERATING_RESCUE, return the next rescue
        # task for these hosts, while all other hosts get a noop
        if num_rescue:
            display.debug("advancing hosts in ITERATING_RESCUE")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)

        # if any hosts are in ITERATING_ALWAYS, return the next always
        # task for these hosts, while all other hosts get a noop
        if num_always:
            display.debug("advancing hosts in ITERATING_ALWAYS")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)

        # at this point, everything must be ITERATING_COMPLETE, so we
        # return None for all hosts in the list
        display.debug("all hosts are done, so returning None's for all hosts")
        return [(host, None) for host in hosts]
예제 #12
0
    def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
        self._play = play
        self._blocks = []

        self._task_uuid_cache = dict()

        # Default options to gather
        gather_subset = C.DEFAULT_GATHER_SUBSET
        gather_timeout = C.DEFAULT_GATHER_TIMEOUT

        # Retrieve subset to gather
        if self._play.gather_subset is not None:
            gather_subset = self._play.gather_subset
        # Retrieve timeout for gather
        if self._play.gather_timeout is not None:
            gather_timeout = self._play.gather_timeout

        setup_block = Block(play=self._play)
        setup_task = Task(block=setup_block)
        setup_task.action = "setup"
        setup_task.name = "Gathering Facts"
        setup_task.tags = ["always"]
        setup_task.args = {"gather_subset": gather_subset}
        if gather_timeout:
            setup_task.args["gather_timeout"] = gather_timeout
        setup_task.set_loader(self._play._loader)
        setup_block.block = [setup_task]

        setup_block = setup_block.filter_tagged_tasks(play_context, all_vars)
        self._blocks.append(setup_block)
        self.cache_block_tasks(setup_block)

        for block in self._play.compile():
            new_block = block.filter_tagged_tasks(play_context, all_vars)
            if new_block.has_tasks():
                self.cache_block_tasks(new_block)
                self._blocks.append(new_block)

        for handler_block in self._play.handlers:
            self.cache_block_tasks(handler_block)

        self._host_states = {}
        start_at_matched = False
        for host in inventory.get_hosts(self._play.hosts):
            self._host_states[host.name] = HostState(blocks=self._blocks)
            # if the host's name is in the variable manager's fact cache, then set
            # its _gathered_facts flag to true for smart gathering tests later
            if host.name in variable_manager._fact_cache and variable_manager._fact_cache.get("module_setup", False):
                host._gathered_facts = True
            # if we're looking to start at a specific task, iterate through
            # the tasks for this host until we find the specified task
            if play_context.start_at_task is not None and not start_at_done:
                while True:
                    (s, task) = self.get_next_task_for_host(host, peek=True)
                    if s.run_state == self.ITERATING_COMPLETE:
                        break
                    if (
                        task.name == play_context.start_at_task
                        or fnmatch.fnmatch(task.name, play_context.start_at_task)
                        or task.get_name() == play_context.start_at_task
                        or fnmatch.fnmatch(task.get_name(), play_context.start_at_task)
                    ):
                        start_at_matched = True
                        break
                    else:
                        self.get_next_task_for_host(host)

                # finally, reset the host's state to ITERATING_SETUP
                if start_at_matched:
                    self._host_states[host.name].did_start_at_task = True
                    self._host_states[host.name].run_state = self.ITERATING_SETUP

        if start_at_matched:
            # we have our match, so clear the start_at_task field on the
            # play context to flag that we've started at a task (and future
            # plays won't try to advance)
            play_context.start_at_task = None
예제 #13
0
파일: bb_linear.py 프로젝트: zsais/ursula
    def run(self, iterator, play_context):
        '''
        The linear strategy is simple - get the next task and queue
        it for all hosts, then wait for the queue to drain before
        moving on to the next task
        '''

        # iteratate over each task, while there is one left to run
        result = True
        work_to_do = True
        while work_to_do and not self._tqm._terminated:

            try:
                display.debug("getting the remaining hosts for this loop")
                hosts_left = [
                    host for host in self._inventory.get_hosts(
                        iterator._play.hosts,
                        ignore_limits_and_restrictions=True)
                    if host.name not in self._tqm._unreachable_hosts
                    and not iterator.is_failed(host)
                ]
                display.debug("done getting the remaining hosts for this loop")

                # queue up this task for each host in the inventory
                callback_sent = False
                work_to_do = False

                host_results = []
                host_tasks = self._get_next_task_lockstep(hosts_left, iterator)

                # skip control
                skip_rest = False
                choose_step = True

                # flag set if task is set to any_errors_fatal
                any_errors_fatal = False

                results = []
                for (host, task) in host_tasks:
                    if not task:
                        continue

                    if self._tqm._terminated:
                        break

                    run_once = False
                    work_to_do = True

                    # test to see if the task across all hosts points to an action plugin which
                    # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
                    # will only send this task to the first host in the list.

                    try:
                        action = action_loader.get(task.action,
                                                   class_only=True)
                    except KeyError:
                        # we don't care here, because the action may simply not have a
                        # corresponding action plugin
                        action = None

                    # check to see if this task should be skipped, due to it being a member of a
                    # role which has already run (and whether that role allows duplicate execution)
                    if task._role and task._role.has_run(host):
                        # If there is no metadata, the default behavior is to not allow duplicates,
                        # if there is metadata, check to see if the allow_duplicates flag was set to true
                        if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
                            display.debug(
                                "'%s' skipped because role has already run" %
                                task)
                            continue

                    if task.action == 'meta':
                        self._execute_meta(task, play_context, iterator)
                    else:
                        # handle step if needed, skip meta actions as they are used internally
                        if self._step and choose_step:
                            if self._take_step(task):
                                choose_step = False
                            else:
                                skip_rest = True
                                break

                        display.debug("getting variables")
                        task_vars = self._variable_manager.get_vars(
                            loader=self._loader,
                            play=iterator._play,
                            host=host,
                            task=task)
                        self.add_tqm_variables(task_vars, play=iterator._play)
                        templar = Templar(loader=self._loader,
                                          variables=task_vars)
                        display.debug("done getting variables")

                        run_once = templar.template(
                            task.run_once) or action and getattr(
                                action, 'BYPASS_HOST_LOOP', False)

                        if task.any_errors_fatal or run_once:
                            any_errors_fatal = True

                        if not callback_sent:
                            display.debug(
                                "sending task start callback, copying the task so we can template it temporarily"
                            )
                            saved_name = task.name
                            display.debug(
                                "done copying, going to template now")
                            try:
                                task.name = text_type(
                                    templar.template(task.name,
                                                     fail_on_undefined=False))
                                display.debug("done templating")
                            except:
                                # just ignore any errors during task name templating,
                                # we don't care if it just shows the raw name
                                display.debug(
                                    "templating failed for some reason")
                                pass
                            display.debug("here goes the callback...")
                            self._tqm.send_callback(
                                'v2_playbook_on_task_start',
                                task,
                                is_conditional=False)
                            task.name = saved_name
                            callback_sent = True
                            display.debug("sending task start callback")

                        self._blocked_hosts[host.get_name()] = True
                        self._queue_task(host, task, task_vars, play_context)

                    # if we're bypassing the host loop, break out now
                    if run_once:
                        break

                    results += self._process_pending_results(iterator,
                                                             one_pass=True)

                # go to next host/task group
                if skip_rest:
                    continue

                display.debug(
                    "done queuing things up, now waiting for results queue to drain"
                )
                results += self._wait_on_pending_results(iterator)
                host_results.extend(results)

                if not work_to_do and len(iterator.get_failed_hosts()) > 0:
                    display.debug("out of hosts to run on")
                    self._tqm.send_callback(
                        'v2_playbook_on_no_hosts_remaining')
                    result = False
                    break

                try:
                    included_files = IncludedFile.process_include_results(
                        host_results,
                        self._tqm,
                        iterator=iterator,
                        inventory=self._inventory,
                        loader=self._loader,
                        variable_manager=self._variable_manager)
                except AnsibleError as e:
                    return False

                include_failure = False
                if len(included_files) > 0:
                    display.debug("we have included files to process")
                    noop_task = Task()
                    noop_task.action = 'meta'
                    noop_task.args['_raw_params'] = 'noop'
                    noop_task.set_loader(iterator._play._loader)

                    display.debug("generating all_blocks data")
                    all_blocks = dict((host, []) for host in hosts_left)
                    display.debug("done generating all_blocks data")
                    for included_file in included_files:
                        display.debug("processing included file: %s" %
                                      included_file._filename)
                        # included hosts get the task list while those excluded get an equal-length
                        # list of noop tasks, to make sure that they continue running in lock-step
                        try:
                            new_blocks = self._load_included_file(
                                included_file, iterator=iterator)

                            display.debug(
                                "iterating over new_blocks loaded from include file"
                            )
                            for new_block in new_blocks:
                                task_vars = self._variable_manager.get_vars(
                                    loader=self._loader,
                                    play=iterator._play,
                                    task=included_file._task,
                                )
                                display.debug("filtering new block on tags")
                                final_block = new_block.filter_tagged_tasks(
                                    play_context, task_vars)
                                display.debug(
                                    "done filtering new block on tags")

                                noop_block = Block(parent_block=task._block)
                                noop_block.block = [
                                    noop_task for t in new_block.block
                                ]
                                noop_block.always = [
                                    noop_task for t in new_block.always
                                ]
                                noop_block.rescue = [
                                    noop_task for t in new_block.rescue
                                ]

                                for host in hosts_left:
                                    if host in included_file._hosts:
                                        all_blocks[host].append(final_block)
                                    else:
                                        all_blocks[host].append(noop_block)
                            display.debug(
                                "done iterating over new_blocks loaded from include file"
                            )

                        except AnsibleError as e:
                            for host in included_file._hosts:
                                self._tqm._failed_hosts[host.name] = True
                                iterator.mark_host_failed(host)
                            display.error(to_unicode(e), wrap_text=False)
                            include_failure = True
                            continue

                    # finally go through all of the hosts and append the
                    # accumulated blocks to their list of tasks
                    display.debug(
                        "extending task lists for all hosts with included blocks"
                    )

                    for host in hosts_left:
                        iterator.add_tasks(host, all_blocks[host])

                    display.debug("done extending task lists")
                    display.debug("done processing included files")

                display.debug("results queue empty")

                display.debug("checking for any_errors_fatal")
                failed_hosts = []
                for res in results:
                    if res.is_failed() or res.is_unreachable():
                        failed_hosts.append(res._host.name)

                # if any_errors_fatal and we had an error, mark all hosts as failed
                if any_errors_fatal and len(failed_hosts) > 0:
                    for host in hosts_left:
                        # don't double-mark hosts, or the iterator will potentially
                        # fail them out of the rescue/always states
                        if host.name not in failed_hosts:
                            self._tqm._failed_hosts[host.name] = True
                            iterator.mark_host_failed(host)
                display.debug("done checking for any_errors_fatal")

            except (IOError, EOFError) as e:
                display.debug("got IOError/EOFError in task loop: %s" % e)
                # most likely an abort, return failed
                return False

        # run the base class run() method, which executes the cleanup function
        # and runs any outstanding handlers which have been triggered

        return super(StrategyModule, self).run(iterator, play_context, result)
예제 #14
0
    def __init__(self,
                 inventory,
                 play,
                 play_context,
                 variable_manager,
                 all_vars,
                 start_at_done=False):
        self._play = play
        self._blocks = []

        # Default options to gather
        gather_subset = C.DEFAULT_GATHER_SUBSET
        gather_timeout = C.DEFAULT_GATHER_TIMEOUT

        # Retrieve subset to gather
        if self._play.gather_subset is not None:
            gather_subset = self._play.gather_subset
        # Retrieve timeout for gather
        if self._play.gather_timeout is not None:
            gather_timeout = self._play.gather_timeout

        setup_block = Block(play=self._play)
        setup_task = Task(block=setup_block)
        setup_task.action = 'setup'
        setup_task.tags = ['always']
        setup_task.args = {
            'gather_subset': gather_subset,
        }
        if gather_timeout:
            setup_task.args['gather_timeout'] = gather_timeout
        setup_task.set_loader(self._play._loader)
        setup_block.block = [setup_task]

        setup_block = setup_block.filter_tagged_tasks(play_context, all_vars)
        self._blocks.append(setup_block)

        for block in self._play.compile():
            new_block = block.filter_tagged_tasks(play_context, all_vars)
            if new_block.has_tasks():
                self._blocks.append(new_block)

        self._host_states = {}
        start_at_matched = False
        for host in inventory.get_hosts(self._play.hosts):
            self._host_states[host.name] = HostState(blocks=self._blocks)
            # if the host's name is in the variable manager's fact cache, then set
            # its _gathered_facts flag to true for smart gathering tests later
            if host.name in variable_manager._fact_cache:
                host._gathered_facts = True
            # if we're looking to start at a specific task, iterate through
            # the tasks for this host until we find the specified task
            if play_context.start_at_task is not None and not start_at_done:
                while True:
                    (s, task) = self.get_next_task_for_host(host, peek=True)
                    if s.run_state == self.ITERATING_COMPLETE:
                        break
                    if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \
                       task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
                        start_at_matched = True
                        break
                    else:
                        self.get_next_task_for_host(host)

                # finally, reset the host's state to ITERATING_SETUP
                if start_at_matched:
                    self._host_states[host.name].did_start_at_task = True
                    self._host_states[
                        host.name].run_state = self.ITERATING_SETUP

        if start_at_matched:
            # we have our match, so clear the start_at_task field on the
            # play context to flag that we've started at a task (and future
            # plays won't try to advance)
            play_context.start_at_task = None
예제 #15
0
    def run(self, iterator, connection_info):
        '''
        The linear strategy is simple - get the next task and queue
        it for all hosts, then wait for the queue to drain before
        moving on to the next task
        '''

        # iteratate over each task, while there is one left to run
        result = True
        work_to_do = True
        while work_to_do and not self._tqm._terminated:

            try:
                debug("getting the remaining hosts for this loop")
                self._tqm._failed_hosts = iterator.get_failed_hosts()
                hosts_left = self.get_hosts_remaining(iterator._play)
                debug("done getting the remaining hosts for this loop")
                if len(hosts_left) == 0:
                    debug("out of hosts to run on")
                    self._tqm.send_callback(
                        'v2_playbook_on_no_hosts_remaining')
                    result = False
                    break

                # queue up this task for each host in the inventory
                callback_sent = False
                work_to_do = False

                host_results = []
                host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
                for (host, task) in host_tasks:
                    if not task:
                        continue

                    run_once = False
                    work_to_do = True

                    # test to see if the task across all hosts points to an action plugin which
                    # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
                    # will only send this task to the first host in the list.

                    try:
                        action = action_loader.get(task.action,
                                                   class_only=True)
                        if task.run_once or getattr(action, 'BYPASS_HOST_LOOP',
                                                    False):
                            run_once = True
                    except KeyError:
                        # we don't care here, because the action may simply not have a
                        # corresponding action plugin
                        pass

                    # check to see if this task should be skipped, due to it being a member of a
                    # role which has already run (and whether that role allows duplicate execution)
                    if task._role and task._role.has_run():
                        # If there is no metadata, the default behavior is to not allow duplicates,
                        # if there is metadata, check to see if the allow_duplicates flag was set to true
                        if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
                            debug("'%s' skipped because role has already run" %
                                  task)
                            continue

                    if task.action == 'meta':
                        # meta tasks store their args in the _raw_params field of args,
                        # since they do not use k=v pairs, so get that
                        meta_action = task.args.get('_raw_params')
                        if meta_action == 'noop':
                            # FIXME: issue a callback for the noop here?
                            continue
                        elif meta_action == 'flush_handlers':
                            self.run_handlers(iterator, connection_info)
                        else:
                            raise AnsibleError(
                                "invalid meta action requested: %s" %
                                meta_action,
                                obj=task._ds)
                    else:
                        debug("getting variables")
                        task_vars = self._variable_manager.get_vars(
                            loader=self._loader,
                            play=iterator._play,
                            host=host,
                            task=task)
                        task_vars = self.add_tqm_variables(task_vars,
                                                           play=iterator._play)
                        templar = Templar(loader=self._loader,
                                          variables=task_vars)
                        debug("done getting variables")

                        if not callback_sent:
                            temp_task = task.copy()
                            temp_task.name = templar.template(
                                temp_task.get_name(), fail_on_undefined=False)
                            self._tqm.send_callback(
                                'v2_playbook_on_task_start',
                                temp_task,
                                is_conditional=False)
                            callback_sent = True

                        self._blocked_hosts[host.get_name()] = True
                        self._queue_task(host, task, task_vars,
                                         connection_info)

                    results = self._process_pending_results(iterator)
                    host_results.extend(results)

                    # if we're bypassing the host loop, break out now
                    if run_once:
                        break

                debug(
                    "done queuing things up, now waiting for results queue to drain"
                )
                results = self._wait_on_pending_results(iterator)
                host_results.extend(results)

                try:
                    included_files = IncludedFile.process_include_results(
                        host_results,
                        self._tqm,
                        iterator=iterator,
                        loader=self._loader,
                        variable_manager=self._variable_manager)
                except AnsibleError, e:
                    return False

                if len(included_files) > 0:
                    noop_task = Task()
                    noop_task.action = 'meta'
                    noop_task.args['_raw_params'] = 'noop'
                    noop_task.set_loader(iterator._play._loader)

                    all_blocks = dict((host, []) for host in hosts_left)
                    for included_file in included_files:
                        # included hosts get the task list while those excluded get an equal-length
                        # list of noop tasks, to make sure that they continue running in lock-step
                        try:
                            new_blocks = self._load_included_file(
                                included_file, iterator=iterator)
                        except AnsibleError, e:
                            for host in included_file._hosts:
                                iterator.mark_host_failed(host)
                            # FIXME: callback here?
                            print(e)
                            continue

                        for new_block in new_blocks:
                            noop_block = Block(parent_block=task._block)
                            noop_block.block = [
                                noop_task for t in new_block.block
                            ]
                            noop_block.always = [
                                noop_task for t in new_block.always
                            ]
                            noop_block.rescue = [
                                noop_task for t in new_block.rescue
                            ]
                            for host in hosts_left:
                                if host in included_file._hosts:
                                    task_vars = self._variable_manager.get_vars(
                                        loader=self._loader,
                                        play=iterator._play,
                                        host=host,
                                        task=included_file._task)
                                    final_block = new_block.filter_tagged_tasks(
                                        connection_info, task_vars)
                                    all_blocks[host].append(final_block)
                                else:
                                    all_blocks[host].append(noop_block)

                    for host in hosts_left:
                        iterator.add_tasks(host, all_blocks[host])

                debug("results queue empty")
예제 #16
0
    def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
        self._play = play
        self._blocks = []
        self._variable_manager = variable_manager

        setup_block = Block(play=self._play)
        # Gathering facts with run_once would copy the facts from one host to
        # the others.
        setup_block.run_once = False
        setup_task = Task(block=setup_block)
        setup_task.action = 'gather_facts'
        # TODO: hardcoded resolution here, but should use actual resolution code in the end,
        #       in case of 'legacy' mismatch
        setup_task.resolved_action = 'ansible.builtin.gather_facts'
        setup_task.name = 'Gathering Facts'
        setup_task.args = {}

        # Unless play is specifically tagged, gathering should 'always' run
        if not self._play.tags:
            setup_task.tags = ['always']

        # Default options to gather
        for option in ('gather_subset', 'gather_timeout', 'fact_path'):
            value = getattr(self._play, option, None)
            if value is not None:
                setup_task.args[option] = value

        setup_task.set_loader(self._play._loader)
        # short circuit fact gathering if the entire playbook is conditional
        if self._play._included_conditional is not None:
            setup_task.when = self._play._included_conditional[:]
        setup_block.block = [setup_task]

        setup_block = setup_block.filter_tagged_tasks(all_vars)
        self._blocks.append(setup_block)

        for block in self._play.compile():
            new_block = block.filter_tagged_tasks(all_vars)
            if new_block.has_tasks():
                self._blocks.append(new_block)

        self._host_states = {}
        start_at_matched = False
        batch = inventory.get_hosts(self._play.hosts, order=self._play.order)
        self.batch_size = len(batch)
        for host in batch:
            self.set_state_for_host(host.name, HostState(blocks=self._blocks))
            # if we're looking to start at a specific task, iterate through
            # the tasks for this host until we find the specified task
            if play_context.start_at_task is not None and not start_at_done:
                while True:
                    (s, task) = self.get_next_task_for_host(host, peek=True)
                    if s.run_state == IteratingStates.COMPLETE:
                        break
                    if task.name == play_context.start_at_task or (task.name and fnmatch.fnmatch(task.name, play_context.start_at_task)) or \
                       task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
                        start_at_matched = True
                        break
                    self.set_state_for_host(host.name, s)

                # finally, reset the host's state to IteratingStates.SETUP
                if start_at_matched:
                    self._host_states[host.name].did_start_at_task = True
                    self._host_states[host.name].run_state = IteratingStates.SETUP

        if start_at_matched:
            # we have our match, so clear the start_at_task field on the
            # play context to flag that we've started at a task (and future
            # plays won't try to advance)
            play_context.start_at_task = None

        self.end_play = False
예제 #17
0
    def next(self, peek=False):
        '''
        Determines and returns the next available task from the playbook,
        advancing through the list of plays as it goes. If peek is set to True,
        the internal state is not stored.
        '''

        task = None

        # save this locally so that we can peek at the next task
        # without updating the internal state of the iterator
        run_state       = self._run_state
        failed_state    = self._failed_state
        cur_block       = self._cur_block
        cur_role        = self._cur_role
        cur_task_pos    = self._cur_task_pos
        cur_rescue_pos  = self._cur_rescue_pos
        cur_always_pos  = self._cur_always_pos
        cur_handler_pos = self._cur_handler_pos


        while True:
            if run_state == ITERATING_SETUP:
                if failed_state == FAILED_SETUP:
                    run_state = ITERATING_COMPLETE
                else:
                    run_state = ITERATING_TASKS

                    if self._gather_facts == 'smart' and not self._host.gathered_facts or boolean(self._gather_facts):
                        self._host.set_gathered_facts(True)
                        task = Task()
                        # FIXME: this is not the best way to get this...
                        task.set_loader(self._parent_iterator._play._loader)
                        task.action = 'setup'
                        break
            elif run_state == ITERATING_TASKS:
                # if there is any failure state besides FAILED_NONE, we should
                # change to some other running state
                if failed_state != FAILED_NONE or cur_task_pos > len(self._task_list) - 1:
                    # if there is a block (and there always should be), start running
                    # the rescue portion if it exists (and if we haven't failed that
                    # already), or the always portion (if it exists and we didn't fail
                    # there too). Otherwise, we're done iterating.
                    if cur_block:
                        if failed_state != FAILED_RESCUE and cur_block.rescue:
                            run_state = ITERATING_RESCUE
                            cur_rescue_pos = 0
                        elif failed_state != FAILED_ALWAYS and cur_block.always:
                            run_state = ITERATING_ALWAYS
                            cur_always_pos = 0
                        else:
                            run_state = ITERATING_COMPLETE
                    else:
                        run_state = ITERATING_COMPLETE
                else:
                    task = self._task_list[cur_task_pos]
                    if cur_block is not None and cur_block != task._block:
                        run_state = ITERATING_ALWAYS
                        continue
                    else:
                        cur_block = task._block
                    cur_task_pos += 1

                    # Break out of the while loop now that we have our task
                    break

            elif run_state == ITERATING_RESCUE:
                # If we're iterating through the rescue tasks, make sure we haven't
                # failed yet. If so, move on to the always block or if not get the
                # next rescue task (if one exists)
                if failed_state == FAILED_RESCUE or cur_block.rescue is None or cur_rescue_pos > len(cur_block.rescue) - 1:
                    run_state = ITERATING_ALWAYS
                else:
                    task = cur_block.rescue[cur_rescue_pos]
                    cur_rescue_pos += 1
                    break

            elif run_state == ITERATING_ALWAYS:
                # If we're iterating through the always tasks, make sure we haven't
                # failed yet. If so, we're done iterating otherwise get the next always
                # task (if one exists)
                if failed_state == FAILED_ALWAYS or cur_block.always is None or cur_always_pos > len(cur_block.always) - 1:
                    cur_block = None
                    if failed_state == FAILED_ALWAYS or cur_task_pos > len(self._task_list) - 1:
                        run_state = ITERATING_COMPLETE
                    else:
                        run_state = ITERATING_TASKS
                else:
                    task = cur_block.always[cur_always_pos]
                    cur_always_pos += 1
                    break

            elif run_state == ITERATING_COMPLETE:
                # done iterating, return None to signify that
                return None

        if task._role:
            # if we had a current role, mark that role as completed
            if cur_role and task._role != cur_role and not peek:
                cur_role._completed = True

            cur_role = task._role

            # if the current role has not had its task run flag set, mark
            # clear the completed flag so we can correctly determine if the
            # role was run
            if not cur_role._had_task_run and not peek:
                cur_role._completed = False

        # If we're not just peeking at the next task, save the internal state 
        if not peek:
            self._run_state       = run_state
            self._failed_state    = failed_state
            self._cur_block       = cur_block
            self._cur_role        = cur_role
            self._cur_task_pos    = cur_task_pos
            self._cur_rescue_pos  = cur_rescue_pos
            self._cur_always_pos  = cur_always_pos
            self._cur_handler_pos = cur_handler_pos

        return task
예제 #18
0
파일: linear.py 프로젝트: imace/ansible
    def _get_next_task_lockstep(self, hosts, iterator):
        '''
        Returns a list of (host, task) tuples, where the task may
        be a noop task to keep the iterator in lock step across
        all hosts.
        '''

        noop_task = Task()
        noop_task.action = 'meta'
        noop_task.args['_raw_params'] = 'noop'
        noop_task.set_loader(iterator._play._loader)

        host_tasks = {}
        for host in hosts:
            host_tasks[host.name] = iterator.get_next_task_for_host(host,
                                                                    peek=True)

        num_setups = 0
        num_tasks = 0
        num_rescue = 0
        num_always = 0

        lowest_cur_block = len(iterator._blocks)

        for (k, v) in host_tasks.iteritems():
            if v is None:
                continue

            (s, t) = v
            if t is None:
                continue

            if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
                lowest_cur_block = s.cur_block

            if s.run_state == PlayIterator.ITERATING_SETUP:
                num_setups += 1
            elif s.run_state == PlayIterator.ITERATING_TASKS:
                num_tasks += 1
            elif s.run_state == PlayIterator.ITERATING_RESCUE:
                num_rescue += 1
            elif s.run_state == PlayIterator.ITERATING_ALWAYS:
                num_always += 1

        def _advance_selected_hosts(hosts, cur_block, cur_state):
            '''
            This helper returns the task for all hosts in the requested
            state, otherwise they get a noop dummy task. This also advances
            the state of the host, since the given states are determined
            while using peek=True.
            '''
            # we return the values in the order they were originally
            # specified in the given hosts array
            rvals = []
            for host in hosts:
                host_state_task = host_tasks[host.name]
                if host_state_task is None:
                    continue
                (s, t) = host_state_task
                if s.run_state == cur_state and s.cur_block == cur_block:
                    new_t = iterator.get_next_task_for_host(host)
                    #if new_t != t:
                    #    raise AnsibleError("iterator error, wtf?")
                    rvals.append((host, t))
                else:
                    rvals.append((host, noop_task))
            return rvals

        # if any hosts are in ITERATING_SETUP, return the setup task
        # while all other hosts get a noop
        if num_setups:
            return _advance_selected_hosts(hosts, lowest_cur_block,
                                           PlayIterator.ITERATING_SETUP)

        # if any hosts are in ITERATING_TASKS, return the next normal
        # task for these hosts, while all other hosts get a noop
        if num_tasks:
            return _advance_selected_hosts(hosts, lowest_cur_block,
                                           PlayIterator.ITERATING_TASKS)

        # if any hosts are in ITERATING_RESCUE, return the next rescue
        # task for these hosts, while all other hosts get a noop
        if num_rescue:
            return _advance_selected_hosts(hosts, lowest_cur_block,
                                           PlayIterator.ITERATING_RESCUE)

        # if any hosts are in ITERATING_ALWAYS, return the next always
        # task for these hosts, while all other hosts get a noop
        if num_always:
            return _advance_selected_hosts(hosts, lowest_cur_block,
                                           PlayIterator.ITERATING_ALWAYS)

        # at this point, everything must be ITERATING_COMPLETE, so we
        # return None for all hosts in the list
        return [(host, None) for host in hosts]
    def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
        self._play = play
        self._blocks = []
        self._variable_manager = variable_manager

        # Default options to gather
        gather_subset = play_context.gather_subset
        gather_timeout = play_context.gather_timeout
        fact_path = play_context.fact_path

        # Retrieve subset to gather
        if self._play.gather_subset is not None:
            gather_subset = self._play.gather_subset
        # Retrieve timeout for gather
        if self._play.gather_timeout is not None:
            gather_timeout = self._play.gather_timeout
        # Retrieve fact_path
        if self._play.fact_path is not None:
            fact_path = self._play.fact_path

        setup_block = Block(play=self._play)
        # Gathering facts with run_once would copy the facts from one host to
        # the others.
        setup_block.run_once = False
        setup_task = Task(block=setup_block)
        setup_task.action = 'gather_facts'
        setup_task.name = 'Gathering Facts'
        setup_task.tags = ['always']
        setup_task.args = {
            'gather_subset': gather_subset,
        }
        if gather_timeout:
            setup_task.args['gather_timeout'] = gather_timeout
        if fact_path:
            setup_task.args['fact_path'] = fact_path
        setup_task.set_loader(self._play._loader)
        # short circuit fact gathering if the entire playbook is conditional
        if self._play._included_conditional is not None:
            setup_task.when = self._play._included_conditional[:]
        setup_block.block = [setup_task]

        setup_block = setup_block.filter_tagged_tasks(play_context, all_vars)
        self._blocks.append(setup_block)
        self.cache_block_tasks(setup_block)

        for block in self._play.compile():
            new_block = block.filter_tagged_tasks(play_context, all_vars)
            if new_block.has_tasks():
                self.cache_block_tasks(new_block)
                self._blocks.append(new_block)

        for handler_block in self._play.handlers:
            self.cache_block_tasks(handler_block)

        self._host_states = {}
        start_at_matched = False
        batch = inventory.get_hosts(self._play.hosts)
        self.batch_size = len(batch)
        for host in batch:
            self._host_states[host.name] = HostState(blocks=self._blocks)
            # if we're looking to start at a specific task, iterate through
            # the tasks for this host until we find the specified task
            if play_context.start_at_task is not None and not start_at_done:
                while True:
                    (s, task) = self.get_next_task_for_host(host, peek=True)
                    if s.run_state == self.ITERATING_COMPLETE:
                        break
                    if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \
                       task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
                        start_at_matched = True
                        break
                    else:
                        self.get_next_task_for_host(host)

                # finally, reset the host's state to ITERATING_SETUP
                if start_at_matched:
                    self._host_states[host.name].did_start_at_task = True
                    self._host_states[host.name].run_state = self.ITERATING_SETUP

        if start_at_matched:
            # we have our match, so clear the start_at_task field on the
            # play context to flag that we've started at a task (and future
            # plays won't try to advance)
            play_context.start_at_task = None
예제 #20
0
    def run(self, iterator, play_context):
        '''
        The linear strategy is simple - get the next task and queue
        it for all hosts, then wait for the queue to drain before
        moving on to the next task
        '''

        # iteratate over each task, while there is one left to run
        result = self._tqm.RUN_OK
        work_to_do = True
        while work_to_do and not self._tqm._terminated:

            try:
                display.debug("getting the remaining hosts for this loop")
                hosts_left = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
                display.debug("done getting the remaining hosts for this loop")

                # queue up this task for each host in the inventory
                callback_sent = False
                work_to_do = False

                host_results = []
                host_tasks = self._get_next_task_lockstep(hosts_left, iterator)

                # skip control
                skip_rest   = False
                choose_step = True

                # flag set if task is set to any_errors_fatal
                any_errors_fatal = False

                results = []
                for (host, task) in host_tasks:
                    if not task:
                        continue

                    if self._tqm._terminated:
                        break

                    run_once = False
                    work_to_do = True

                    # test to see if the task across all hosts points to an action plugin which
                    # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
                    # will only send this task to the first host in the list.

                    try:
                        action = action_loader.get(task.action, class_only=True)
                    except KeyError:
                        # we don't care here, because the action may simply not have a
                        # corresponding action plugin
                        action = None

                    # check to see if this task should be skipped, due to it being a member of a
                    # role which has already run (and whether that role allows duplicate execution)
                    if task._role and task._role.has_run(host):
                        # If there is no metadata, the default behavior is to not allow duplicates,
                        # if there is metadata, check to see if the allow_duplicates flag was set to true
                        if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
                            display.debug("'%s' skipped because role has already run" % task)
                            continue

                    if task.action == 'meta':
                        # for the linear strategy, we run meta tasks just once and for
                        # all hosts currently being iterated over rather than one host
                        results.extend(self._execute_meta(task, play_context, iterator))
                        if task.args.get('_raw_params', None) != 'noop':
                            run_once = True
                    else:
                        # handle step if needed, skip meta actions as they are used internally
                        if self._step and choose_step:
                            if self._take_step(task):
                                choose_step = False
                            else:
                                skip_rest = True
                                break

                        display.debug("getting variables")
                        task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
                        self.add_tqm_variables(task_vars, play=iterator._play)
                        templar = Templar(loader=self._loader, variables=task_vars)
                        display.debug("done getting variables")

                        run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False)

                        if (task.any_errors_fatal or run_once) and not task.ignore_errors:
                            any_errors_fatal = True

                        if not callback_sent:
                            display.debug("sending task start callback, copying the task so we can template it temporarily")
                            saved_name = task.name
                            display.debug("done copying, going to template now")
                            try:
                                task.name = to_text(templar.template(task.name, fail_on_undefined=False), nonstring='empty')
                                display.debug("done templating")
                            except:
                                # just ignore any errors during task name templating,
                                # we don't care if it just shows the raw name
                                display.debug("templating failed for some reason")
                                pass
                            display.debug("here goes the callback...")
                            self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
                            task.name = saved_name
                            callback_sent = True
                            display.debug("sending task start callback")

                        self._blocked_hosts[host.get_name()] = True
                        self._queue_task(host, task, task_vars, play_context)
                        del task_vars

                    # if we're bypassing the host loop, break out now
                    if run_once:
                        break

                    results += self._process_pending_results(iterator, max_passes=max(1, int(len(self._tqm._workers) * 0.1)))

                # go to next host/task group
                if skip_rest:
                    continue

                display.debug("done queuing things up, now waiting for results queue to drain")
                if self._pending_results > 0:
                    results += self._wait_on_pending_results(iterator)
                host_results.extend(results)

                all_role_blocks = []
                for hr in results:
                    # handle include_role
                    if hr._task.action == 'include_role':
                        loop_var = None
                        if hr._task.loop:
                            loop_var = 'item'
                            if hr._task.loop_control:
                                loop_var = hr._task.loop_control.loop_var or 'item'
                            include_results = hr._result.get('results', [])
                        else:
                            include_results = [ hr._result ]

                        for include_result in include_results:
                            if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result and include_result['failed']:
                                continue

                            display.debug("generating all_blocks data for role")
                            new_ir = hr._task.copy()
                            new_ir.vars.update(include_result.get('include_variables', dict()))
                            if loop_var and loop_var in include_result:
                                new_ir.vars[loop_var] = include_result[loop_var]

                            all_role_blocks.extend(new_ir.get_block_list(play=iterator._play, variable_manager=self._variable_manager, loader=self._loader))

                if len(all_role_blocks) > 0:
                    for host in hosts_left:
                        iterator.add_tasks(host, all_role_blocks)

                try:
                    included_files = IncludedFile.process_include_results(
                        host_results,
                        self._tqm,
                        iterator=iterator,
                        inventory=self._inventory,
                        loader=self._loader,
                        variable_manager=self._variable_manager
                    )
                except AnsibleError as e:
                    # this is a fatal error, so we abort here regardless of block state
                    return self._tqm.RUN_ERROR

                include_failure = False
                if len(included_files) > 0:
                    display.debug("we have included files to process")
                    noop_task = Task()
                    noop_task.action = 'meta'
                    noop_task.args['_raw_params'] = 'noop'
                    noop_task.set_loader(iterator._play._loader)

                    display.debug("generating all_blocks data")
                    all_blocks = dict((host, []) for host in hosts_left)
                    display.debug("done generating all_blocks data")
                    for included_file in included_files:
                        display.debug("processing included file: %s" % included_file._filename)
                        # included hosts get the task list while those excluded get an equal-length
                        # list of noop tasks, to make sure that they continue running in lock-step
                        try:
                            new_blocks = self._load_included_file(included_file, iterator=iterator)

                            display.debug("iterating over new_blocks loaded from include file")
                            for new_block in new_blocks:
                                task_vars = self._variable_manager.get_vars(
                                    loader=self._loader,
                                    play=iterator._play,
                                    task=included_file._task,
                                )
                                display.debug("filtering new block on tags")
                                final_block = new_block.filter_tagged_tasks(play_context, task_vars)
                                display.debug("done filtering new block on tags")

                                noop_block = Block(parent_block=task._parent)
                                noop_block.block  = [noop_task for t in new_block.block]
                                noop_block.always = [noop_task for t in new_block.always]
                                noop_block.rescue = [noop_task for t in new_block.rescue]

                                for host in hosts_left:
                                    if host in included_file._hosts:
                                        all_blocks[host].append(final_block)
                                    else:
                                        all_blocks[host].append(noop_block)
                            display.debug("done iterating over new_blocks loaded from include file")

                        except AnsibleError as e:
                            for host in included_file._hosts:
                                self._tqm._failed_hosts[host.name] = True
                                iterator.mark_host_failed(host)
                            display.error(to_text(e), wrap_text=False)
                            include_failure = True
                            continue

                    # finally go through all of the hosts and append the
                    # accumulated blocks to their list of tasks
                    display.debug("extending task lists for all hosts with included blocks")

                    for host in hosts_left:
                        iterator.add_tasks(host, all_blocks[host])

                    display.debug("done extending task lists")
                    display.debug("done processing included files")

                display.debug("results queue empty")

                display.debug("checking for any_errors_fatal")
                failed_hosts = []
                unreachable_hosts = []
                for res in results:
                    if res.is_failed():
                        failed_hosts.append(res._host.name)
                    elif res.is_unreachable():
                        unreachable_hosts.append(res._host.name)

                # if any_errors_fatal and we had an error, mark all hosts as failed
                if any_errors_fatal and (len(failed_hosts) > 0 or len(unreachable_hosts) > 0):
                    for host in hosts_left:
                        (s, _) = iterator.get_next_task_for_host(host, peek=True)
                        if s.run_state != iterator.ITERATING_RESCUE or \
                           s.run_state == iterator.ITERATING_RESCUE and s.fail_state & iterator.FAILED_RESCUE != 0:
                            self._tqm._failed_hosts[host.name] = True
                            result |= self._tqm.RUN_FAILED_BREAK_PLAY
                display.debug("done checking for any_errors_fatal")

                display.debug("checking for max_fail_percentage")
                if iterator._play.max_fail_percentage is not None and len(results) > 0:
                    percentage = iterator._play.max_fail_percentage / 100.0

                    if (len(self._tqm._failed_hosts) / len(results)) > percentage:
                        for host in hosts_left:
                            # don't double-mark hosts, or the iterator will potentially
                            # fail them out of the rescue/always states
                            if host.name not in failed_hosts:
                                self._tqm._failed_hosts[host.name] = True
                                iterator.mark_host_failed(host)
                        self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
                        result |= self._tqm.RUN_FAILED_BREAK_PLAY
                display.debug("done checking for max_fail_percentage")

                display.debug("checking to see if all hosts have failed and the running result is not ok")
                if result != self._tqm.RUN_OK and len(self._tqm._failed_hosts) >= len(hosts_left):
                    display.debug("^ not ok, so returning result now")
                    self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
                    return result
                display.debug("done checking to see if all hosts have failed")

            except (IOError, EOFError) as e:
                display.debug("got IOError/EOFError in task loop: %s" % e)
                # most likely an abort, return failed
                return self._tqm.RUN_UNKNOWN_ERROR

        # run the base class run() method, which executes the cleanup function
        # and runs any outstanding handlers which have been triggered

        return super(StrategyModule, self).run(iterator, play_context, result)
예제 #21
0
    def run(self, iterator, play_context):
        '''
        The linear strategy is simple - get the next task and queue
        it for all hosts, then wait for the queue to drain before
        moving on to the next task
        '''

        # iteratate over each task, while there is one left to run
        result = self._tqm.RUN_OK
        work_to_do = True
        while work_to_do and not self._tqm._terminated:

            try:
                display.debug("getting the remaining hosts for this loop")
                hosts_left = self.get_hosts_left(iterator)
                display.debug("done getting the remaining hosts for this loop")

                # queue up this task for each host in the inventory
                callback_sent = False
                work_to_do = False

                host_results = []
                host_tasks = self._get_next_task_lockstep(hosts_left, iterator)

                # skip control
                skip_rest   = False
                choose_step = True

                # flag set if task is set to any_errors_fatal
                any_errors_fatal = False

                results = []
                for (host, task) in host_tasks:
                    if not task:
                        continue

                    if self._tqm._terminated:
                        break

                    run_once = False
                    work_to_do = True

                    # test to see if the task across all hosts points to an action plugin which
                    # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
                    # will only send this task to the first host in the list.

                    try:
                        action = action_loader.get(task.action, class_only=True)
                    except KeyError:
                        # we don't care here, because the action may simply not have a
                        # corresponding action plugin
                        action = None

                    # check to see if this task should be skipped, due to it being a member of a
                    # role which has already run (and whether that role allows duplicate execution)
                    if task._role and task._role.has_run(host):
                        # If there is no metadata, the default behavior is to not allow duplicates,
                        # if there is metadata, check to see if the allow_duplicates flag was set to true
                        if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
                            display.debug("'%s' skipped because role has already run" % task)
                            continue

                    if task.action == 'meta':
                        # for the linear strategy, we run meta tasks just once and for
                        # all hosts currently being iterated over rather than one host
                        results.extend(self._execute_meta(task, play_context, iterator, host))
                        if task.args.get('_raw_params', None) != 'noop':
                            run_once = True
                    else:
                        # handle step if needed, skip meta actions as they are used internally
                        if self._step and choose_step:
                            if self._take_step(task):
                                choose_step = False
                            else:
                                skip_rest = True
                                break

                        display.debug("getting variables")
                        task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
                        self.add_tqm_variables(task_vars, play=iterator._play)
                        templar = Templar(loader=self._loader, variables=task_vars)
                        display.debug("done getting variables")

                        run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False)

                        if (task.any_errors_fatal or run_once) and not task.ignore_errors:
                            any_errors_fatal = True

                        if not callback_sent:
                            display.debug("sending task start callback, copying the task so we can template it temporarily")
                            saved_name = task.name
                            display.debug("done copying, going to template now")
                            try:
                                task.name = to_text(templar.template(task.name, fail_on_undefined=False), nonstring='empty')
                                display.debug("done templating")
                            except:
                                # just ignore any errors during task name templating,
                                # we don't care if it just shows the raw name
                                display.debug("templating failed for some reason")
                                pass
                            display.debug("here goes the callback...")
                            self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
                            task.name = saved_name
                            callback_sent = True
                            display.debug("sending task start callback")

                        self._blocked_hosts[host.get_name()] = True
                        self._queue_task(host, task, task_vars, play_context)
                        del task_vars

                    # if we're bypassing the host loop, break out now
                    if run_once:
                        break

                    results += self._process_pending_results(iterator, max_passes=max(1, int(len(self._tqm._workers) * 0.1)))

                # go to next host/task group
                if skip_rest:
                    continue

                display.debug("done queuing things up, now waiting for results queue to drain")
                if self._pending_results > 0:
                    results += self._wait_on_pending_results(iterator)
                host_results.extend(results)

                all_role_blocks = []
                for hr in results:
                    # handle include_role
                    if hr._task.action == 'include_role':
                        loop_var = None
                        if hr._task.loop:
                            loop_var = 'item'
                            if hr._task.loop_control:
                                loop_var = hr._task.loop_control.loop_var or 'item'
                            include_results = hr._result.get('results', [])
                        else:
                            include_results = [ hr._result ]

                        for include_result in include_results:
                            if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result and include_result['failed']:
                                continue

                            display.debug("generating all_blocks data for role")
                            new_ir = hr._task.copy()
                            new_ir.vars.update(include_result.get('include_variables', dict()))
                            if loop_var and loop_var in include_result:
                                new_ir.vars[loop_var] = include_result[loop_var]

                            all_role_blocks.extend(new_ir.get_block_list(play=iterator._play, variable_manager=self._variable_manager, loader=self._loader))

                if len(all_role_blocks) > 0:
                    for host in hosts_left:
                        iterator.add_tasks(host, all_role_blocks)

                try:
                    included_files = IncludedFile.process_include_results(
                        host_results,
                        self._tqm,
                        iterator=iterator,
                        inventory=self._inventory,
                        loader=self._loader,
                        variable_manager=self._variable_manager
                    )
                except AnsibleError as e:
                    # this is a fatal error, so we abort here regardless of block state
                    return self._tqm.RUN_ERROR

                include_failure = False
                if len(included_files) > 0:
                    display.debug("we have included files to process")
                    noop_task = Task()
                    noop_task.action = 'meta'
                    noop_task.args['_raw_params'] = 'noop'
                    noop_task.set_loader(iterator._play._loader)

                    display.debug("generating all_blocks data")
                    all_blocks = dict((host, []) for host in hosts_left)
                    display.debug("done generating all_blocks data")
                    for included_file in included_files:
                        display.debug("processing included file: %s" % included_file._filename)
                        # included hosts get the task list while those excluded get an equal-length
                        # list of noop tasks, to make sure that they continue running in lock-step
                        try:
                            new_blocks = self._load_included_file(included_file, iterator=iterator)

                            display.debug("iterating over new_blocks loaded from include file")
                            for new_block in new_blocks:
                                task_vars = self._variable_manager.get_vars(
                                    loader=self._loader,
                                    play=iterator._play,
                                    task=included_file._task,
                                )
                                display.debug("filtering new block on tags")
                                final_block = new_block.filter_tagged_tasks(play_context, task_vars)
                                display.debug("done filtering new block on tags")

                                noop_block = Block(parent_block=task._parent)
                                noop_block.block  = [noop_task for t in new_block.block]
                                noop_block.always = [noop_task for t in new_block.always]
                                noop_block.rescue = [noop_task for t in new_block.rescue]

                                for host in hosts_left:
                                    if host in included_file._hosts:
                                        all_blocks[host].append(final_block)
                                    else:
                                        all_blocks[host].append(noop_block)
                            display.debug("done iterating over new_blocks loaded from include file")

                        except AnsibleError as e:
                            for host in included_file._hosts:
                                self._tqm._failed_hosts[host.name] = True
                                iterator.mark_host_failed(host)
                            display.error(to_text(e), wrap_text=False)
                            include_failure = True
                            continue

                    # finally go through all of the hosts and append the
                    # accumulated blocks to their list of tasks
                    display.debug("extending task lists for all hosts with included blocks")

                    for host in hosts_left:
                        iterator.add_tasks(host, all_blocks[host])

                    display.debug("done extending task lists")
                    display.debug("done processing included files")

                display.debug("results queue empty")

                display.debug("checking for any_errors_fatal")
                failed_hosts = []
                unreachable_hosts = []
                for res in results:
                    if res.is_failed():
                        failed_hosts.append(res._host.name)
                    elif res.is_unreachable():
                        unreachable_hosts.append(res._host.name)

                # if any_errors_fatal and we had an error, mark all hosts as failed
                if any_errors_fatal and (len(failed_hosts) > 0 or len(unreachable_hosts) > 0):
                    for host in hosts_left:
                        (s, _) = iterator.get_next_task_for_host(host, peek=True)
                        if s.run_state != iterator.ITERATING_RESCUE or \
                           s.run_state == iterator.ITERATING_RESCUE and s.fail_state & iterator.FAILED_RESCUE != 0:
                            self._tqm._failed_hosts[host.name] = True
                            result |= self._tqm.RUN_FAILED_BREAK_PLAY
                display.debug("done checking for any_errors_fatal")

                display.debug("checking for max_fail_percentage")
                if iterator._play.max_fail_percentage is not None and len(results) > 0:
                    percentage = iterator._play.max_fail_percentage / 100.0

                    if (len(self._tqm._failed_hosts) / len(results)) > percentage:
                        for host in hosts_left:
                            # don't double-mark hosts, or the iterator will potentially
                            # fail them out of the rescue/always states
                            if host.name not in failed_hosts:
                                self._tqm._failed_hosts[host.name] = True
                                iterator.mark_host_failed(host)
                        self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
                        result |= self._tqm.RUN_FAILED_BREAK_PLAY
                display.debug("done checking for max_fail_percentage")

                display.debug("checking to see if all hosts have failed and the running result is not ok")
                if result != self._tqm.RUN_OK and len(self._tqm._failed_hosts) >= len(hosts_left):
                    display.debug("^ not ok, so returning result now")
                    self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
                    return result
                display.debug("done checking to see if all hosts have failed")

            except (IOError, EOFError) as e:
                display.debug("got IOError/EOFError in task loop: %s" % e)
                # most likely an abort, return failed
                return self._tqm.RUN_UNKNOWN_ERROR

        # run the base class run() method, which executes the cleanup function
        # and runs any outstanding handlers which have been triggered

        return super(StrategyModule, self).run(iterator, play_context, result)
예제 #22
0
    def _get_next_task_lockstep(self, hosts, iterator):
        '''
        Returns a list of (host, task) tuples, where the task may
        be a noop task to keep the iterator in lock step across
        all hosts.
        '''

        noop_task = Task()
        noop_task.action = 'meta'
        noop_task.args['_raw_params'] = 'noop'
        noop_task.set_loader(iterator._play._loader)

        host_tasks = {}
        display.debug("building list of next tasks for hosts")
        for host in hosts:
            host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
        display.debug("done building task lists")

        num_setups = 0
        num_tasks  = 0
        num_rescue = 0
        num_always = 0

        lowest_cur_block = len(iterator._blocks)

        display.debug("counting tasks in each state of execution")
        for (k, v) in iteritems(host_tasks):
            if v is None:
                continue

            (s, t) = v
            if t is None:
                continue

            if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
                lowest_cur_block = s.cur_block

            if s.run_state == PlayIterator.ITERATING_SETUP:
                num_setups += 1
            elif s.run_state == PlayIterator.ITERATING_TASKS:
                num_tasks += 1
            elif s.run_state == PlayIterator.ITERATING_RESCUE:
                num_rescue += 1
            elif s.run_state == PlayIterator.ITERATING_ALWAYS:
                num_always += 1
        display.debug("done counting tasks in each state of execution")

        def _advance_selected_hosts(hosts, cur_block, cur_state):
            '''
            This helper returns the task for all hosts in the requested
            state, otherwise they get a noop dummy task. This also advances
            the state of the host, since the given states are determined
            while using peek=True.
            '''
            # we return the values in the order they were originally
            # specified in the given hosts array
            rvals = []
            display.debug("starting to advance hosts")
            for host in hosts:
                host_state_task = host_tasks[host.name]
                if host_state_task is None:
                    continue
                (s, t) = host_state_task
                if t is None:
                    continue
                if s.run_state == cur_state and s.cur_block == cur_block:
                    new_t = iterator.get_next_task_for_host(host)
                    rvals.append((host, t))
                else:
                    rvals.append((host, noop_task))
            display.debug("done advancing hosts to next task")
            return rvals

        # if any hosts are in ITERATING_SETUP, return the setup task
        # while all other hosts get a noop
        if num_setups:
            display.debug("advancing hosts in ITERATING_SETUP")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)

        # if any hosts are in ITERATING_TASKS, return the next normal
        # task for these hosts, while all other hosts get a noop
        if num_tasks:
            display.debug("advancing hosts in ITERATING_TASKS")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)

        # if any hosts are in ITERATING_RESCUE, return the next rescue
        # task for these hosts, while all other hosts get a noop
        if num_rescue:
            display.debug("advancing hosts in ITERATING_RESCUE")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)

        # if any hosts are in ITERATING_ALWAYS, return the next always
        # task for these hosts, while all other hosts get a noop
        if num_always:
            display.debug("advancing hosts in ITERATING_ALWAYS")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)

        # at this point, everything must be ITERATING_COMPLETE, so we
        # return None for all hosts in the list
        display.debug("all hosts are done, so returning None's for all hosts")
        return [(host, None) for host in hosts]
예제 #23
0
    def _get_next_task_lockstep(self, hosts, iterator):
        '''
        Returns a list of (host, task) tuples, where the task may
        be a noop task to keep the iterator in lock step across
        all hosts.
        '''

        noop_task = Task()
        noop_task.action = 'meta'
        noop_task.args['_raw_params'] = 'noop'
        noop_task.set_loader(iterator._play._loader)

        host_tasks = {}
        display.debug("building list of next tasks for hosts")
        for host in hosts:
            host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
        display.debug("done building task lists")

        num_setups = 0
        num_tasks  = 0
        num_rescue = 0
        num_always = 0

        display.debug("counting tasks in each state of execution")
        host_tasks_to_run = [(host, state_task)
                             for host, state_task in iteritems(host_tasks)
                             if state_task and state_task[1]]

        if host_tasks_to_run:
            try:
                lowest_cur_block = min(
                    (s.cur_block for h, (s, t) in host_tasks_to_run
                    if s.run_state != PlayIterator.ITERATING_COMPLETE))
            except ValueError:
                lowest_cur_block = None
        else:
            # empty host_tasks_to_run will just run till the end of the function
            # without ever touching lowest_cur_block
            lowest_cur_block = None

        for (k, v) in host_tasks_to_run:
            (s, t) = v

            if s.cur_block > lowest_cur_block:
                # Not the current block, ignore it
                continue

            if s.run_state == PlayIterator.ITERATING_SETUP:
                num_setups += 1
            elif s.run_state == PlayIterator.ITERATING_TASKS:
                num_tasks += 1
            elif s.run_state == PlayIterator.ITERATING_RESCUE:
                num_rescue += 1
            elif s.run_state == PlayIterator.ITERATING_ALWAYS:
                num_always += 1
        display.debug("done counting tasks in each state of execution:\n\tnum_setups: %s\n\tnum_tasks: %s\n\tnum_rescue: %s\n\tnum_always: %s" % (num_setups,
                                                                                                                                                  num_tasks,
                                                                                                                                                  num_rescue,
                                                                                                                                                  num_always))

        def _advance_selected_hosts(hosts, cur_block, cur_state):
            '''
            This helper returns the task for all hosts in the requested
            state, otherwise they get a noop dummy task. This also advances
            the state of the host, since the given states are determined
            while using peek=True.
            '''
            # we return the values in the order they were originally
            # specified in the given hosts array
            rvals = []
            display.debug("starting to advance hosts")
            for host in hosts:
                host_state_task = host_tasks.get(host.name)
                if host_state_task is None:
                    continue
                (s, t) = host_state_task
                if t is None:
                    continue
                if s.run_state == cur_state and s.cur_block == cur_block:
                    new_t = iterator.get_next_task_for_host(host)
                    rvals.append((host, t))
                else:
                    rvals.append((host, noop_task))
            display.debug("done advancing hosts to next task")
            return rvals

        # if any hosts are in ITERATING_SETUP, return the setup task
        # while all other hosts get a noop
        if num_setups:
            display.debug("advancing hosts in ITERATING_SETUP")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)

        # if any hosts are in ITERATING_TASKS, return the next normal
        # task for these hosts, while all other hosts get a noop
        if num_tasks:
            display.debug("advancing hosts in ITERATING_TASKS")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)

        # if any hosts are in ITERATING_RESCUE, return the next rescue
        # task for these hosts, while all other hosts get a noop
        if num_rescue:
            display.debug("advancing hosts in ITERATING_RESCUE")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)

        # if any hosts are in ITERATING_ALWAYS, return the next always
        # task for these hosts, while all other hosts get a noop
        if num_always:
            display.debug("advancing hosts in ITERATING_ALWAYS")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)

        # at this point, everything must be ITERATING_COMPLETE, so we
        # return None for all hosts in the list
        display.debug("all hosts are done, so returning None's for all hosts")
        return [(host, None) for host in hosts]
예제 #24
0
파일: linear.py 프로젝트: dataxu/ansible
    def _get_next_task_lockstep(self, hosts, iterator):
        '''
        Returns a list of (host, task) tuples, where the task may
        be a noop task to keep the iterator in lock step across
        all hosts.
        '''

        noop_task = Task()
        noop_task.action = 'meta'
        noop_task.args['_raw_params'] = 'noop'
        noop_task.set_loader(iterator._play._loader)

        host_tasks = {}
        for host in hosts:
            host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)

        num_setups = 0
        num_tasks  = 0
        num_rescue = 0
        num_always = 0

        lowest_cur_block = len(iterator._blocks)

        for (k, v) in host_tasks.iteritems():
            (s, t) = v
            if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
                lowest_cur_block = s.cur_block

            if s.run_state == PlayIterator.ITERATING_SETUP:
                num_setups += 1
            elif s.run_state == PlayIterator.ITERATING_TASKS:
                num_tasks += 1
            elif s.run_state == PlayIterator.ITERATING_RESCUE:
                num_rescue += 1
            elif s.run_state == PlayIterator.ITERATING_ALWAYS:
                num_always += 1

        def _advance_selected_hosts(hosts, cur_block, cur_state):
            '''
            This helper returns the task for all hosts in the requested
            state, otherwise they get a noop dummy task. This also advances
            the state of the host, since the given states are determined
            while using peek=True.
            '''
            # we return the values in the order they were originally
            # specified in the given hosts array
            rvals = []
            for host in hosts:
                (s, t) = host_tasks[host.name]
                if s.run_state == cur_state and s.cur_block == cur_block:
                    new_t = iterator.get_next_task_for_host(host)
                    #if new_t != t:
                    #    raise AnsibleError("iterator error, wtf?")
                    rvals.append((host, t))
                else:
                    rvals.append((host, noop_task))
            return rvals

        # if any hosts are in ITERATING_SETUP, return the setup task
        # while all other hosts get a noop
        if num_setups:
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)

        # if any hosts are in ITERATING_TASKS, return the next normal
        # task for these hosts, while all other hosts get a noop
        if num_tasks:
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)

        # if any hosts are in ITERATING_RESCUE, return the next rescue
        # task for these hosts, while all other hosts get a noop
        if num_rescue:
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)

        # if any hosts are in ITERATING_ALWAYS, return the next always
        # task for these hosts, while all other hosts get a noop
        if num_always:
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)

        # at this point, everything must be ITERATING_COMPLETE, so we
        # return None for all hosts in the list
        return [(host, None) for host in hosts]
예제 #25
0
    def get_next_task_for_host(self, host, peek=False):

        display.debug("getting the next task for host %s" % host.name)
        s = self.get_host_state(host)

        task = None
        if s.run_state == self.ITERATING_COMPLETE:
            display.debug("host %s is done iterating, returning" % host.name)
            return (None, None)
        elif s.run_state == self.ITERATING_SETUP:
            s.run_state = self.ITERATING_TASKS
            s.pending_setup = True

            # Gather facts if the default is 'smart' and we have not yet
            # done it for this host; or if 'explicit' and the play sets
            # gather_facts to True; or if 'implicit' and the play does
            # NOT explicitly set gather_facts to False.

            gathering = C.DEFAULT_GATHERING
            implied = self._play.gather_facts is None or boolean(
                self._play.gather_facts)

            if (gathering == 'implicit' and implied) or \
               (gathering == 'explicit' and boolean(self._play.gather_facts)) or \
               (gathering == 'smart' and implied and not host._gathered_facts):
                if not peek:
                    # mark the host as having gathered facts
                    host.set_gathered_facts(True)

                task = Task()
                task.action = 'setup'
                task.args = {}
                task.set_loader(self._play._loader)
            else:
                s.pending_setup = False

        if not task:
            old_s = s
            (s, task) = self._get_next_task_from_state(s, peek=peek)

        def _roles_are_different(ra, rb):
            if ra != rb:
                return True
            else:
                return old_s.cur_dep_chain != task._block._dep_chain

        if task and task._role:
            # if we had a current role, mark that role as completed
            if s.cur_role and _roles_are_different(
                    task._role, s.cur_role
            ) and host.name in s.cur_role._had_task_run and not peek:
                s.cur_role._completed[host.name] = True
            s.cur_role = task._role
            s.cur_dep_chain = task._block._dep_chain

        if not peek:
            self._host_states[host.name] = s

        display.debug("done getting next task for host %s" % host.name)
        display.debug(" ^ task is: %s" % task)
        display.debug(" ^ state is: %s" % s)
        return (s, task)