Ejemplo n.º 1
0
    def get_next_task_for_host(self, host, peek=False):

        s = self.get_host_state(host)

        task = None
        if s.run_state == self.ITERATING_COMPLETE:
            return None
        elif s.run_state == self.ITERATING_SETUP:
            s.run_state = self.ITERATING_TASKS
            s.pending_setup = True
            if self._play.gather_facts == 'smart' and not host._gathered_facts or boolean(self._play.gather_facts):
                if not peek:
                    # mark the host as having gathered facts
                    host.set_gathered_facts(True)

                task = Task()
                task.action = 'setup'
                task.args   = {}
                task.set_loader(self._play._loader)
            else:
                s.pending_setup = False

        if not task:
            (s, task) = self._get_next_task_from_state(s, peek=peek)

        if task and task._role:
            # if we had a current role, mark that role as completed
            if s.cur_role and task._role != s.cur_role and s.cur_role._had_task_run and not peek:
                s.cur_role._completed = True
            s.cur_role = task._role

        if not peek:
            self._host_states[host.name] = s

        return (s, task)
Ejemplo n.º 2
0
    def deserialize(self, data):
        '''
        Override of the default deserialize method, to match the above overridden
        serialize method
        '''

        from ansible.playbook.task import Task

        # we don't want the full set of attributes (the task lists), as that
        # would lead to a serialize/deserialize loop
        for attr in self._get_base_attributes():
            if attr in data and attr not in ('block', 'rescue', 'always'):
                setattr(self, attr, data.get(attr))

        self._dep_chain = data.get('dep_chain', [])

        # if there was a serialized role, unpack it too
        role_data = data.get('role')
        if role_data:
            r = Role()
            r.deserialize(role_data)
            self._role = r

        # if there was a serialized task include, unpack it too
        ti_data = data.get('task_include')
        if ti_data:
            ti = Task()
            ti.deserialize(ti_data)
            self._task_include = ti

        pb_data = data.get('parent_block')
        if pb_data:
            pb = Block()
            pb.deserialize(pb_data)
            self._parent_block = pb
Ejemplo n.º 3
0
    def deserialize(self, data):
        '''
        Override of the default deserialize method, to match the above overridden
        serialize method
        '''

        from ansible.playbook.task import Task

        # unpack the when attribute, which is the only one we want
        self.when = data.get('when')
        self._dep_chain = data.get('dep_chain', [])

        # if there was a serialized role, unpack it too
        role_data = data.get('role')
        if role_data:
            r = Role()
            r.deserialize(role_data)
            self._role = r

        # if there was a serialized task include, unpack it too
        ti_data = data.get('task_include')
        if ti_data:
            ti = Task()
            ti.deserialize(ti_data)
            self._task_include = ti
Ejemplo n.º 4
0
def test_process_include_simulate_free(mock_iterator, mock_variable_manager):
    hostname = "testhost1"
    hostname2 = "testhost2"

    parent_task_ds = {'debug': 'msg=foo'}
    parent_task1 = Task.load(parent_task_ds)
    parent_task2 = Task.load(parent_task_ds)

    task_ds = {'include': 'include_test.yml'}
    loaded_task1 = TaskInclude.load(task_ds, task_include=parent_task1)
    loaded_task2 = TaskInclude.load(task_ds, task_include=parent_task2)

    return_data = {'include': 'include_test.yml'}
    # The task in the TaskResult has to be a TaskInclude so it has a .static attr
    result1 = task_result.TaskResult(host=hostname, task=loaded_task1, return_data=return_data)
    result2 = task_result.TaskResult(host=hostname2, task=loaded_task2, return_data=return_data)
    results = [result1, result2]

    fake_loader = DictDataLoader({'include_test.yml': ""})

    res = IncludedFile.process_include_results(results, mock_iterator, fake_loader, mock_variable_manager)
    assert isinstance(res, list)
    assert len(res) == 2
    assert res[0]._filename == os.path.join(os.getcwd(), 'include_test.yml')
    assert res[1]._filename == os.path.join(os.getcwd(), 'include_test.yml')

    assert res[0]._hosts == ['testhost1']
    assert res[1]._hosts == ['testhost2']

    assert res[0]._args == {}
    assert res[1]._args == {}
Ejemplo n.º 5
0
    def test_process_include_results(self):
        hostname = "testhost1"
        hostname2 = "testhost2"

        parent_task_ds = {'debug': 'msg=foo'}
        parent_task = Task()
        parent_task.load(parent_task_ds)

        task_ds = {'include': 'include_test.yml'}
        task_include = TaskInclude()
        loaded_task = task_include.load(task_ds, task_include=parent_task)

        child_task_ds = {'include': 'other_include_test.yml'}
        child_task_include = TaskInclude()
        loaded_child_task = child_task_include.load(child_task_ds, task_include=loaded_task)

        return_data = {'include': 'include_test.yml'}
        # The task in the TaskResult has to be a TaskInclude so it has a .static attr
        result1 = task_result.TaskResult(host=hostname, task=loaded_task, return_data=return_data)

        return_data = {'include': 'other_include_test.yml'}
        result2 = task_result.TaskResult(host=hostname2, task=loaded_child_task, return_data=return_data)
        results = [result1, result2]

        fake_loader = DictDataLoader({'include_test.yml': "",
                                      'other_include_test.yml': ""})

        mock_tqm = MagicMock(name='MockTaskQueueManager')

        mock_play = MagicMock(name='MockPlay')

        mock_iterator = MagicMock(name='MockIterator')
        mock_iterator._play = mock_play

        mock_inventory = MagicMock(name='MockInventory')
        mock_inventory._hosts_cache = dict()

        def _get_host(host_name):
            return None

        mock_inventory.get_host.side_effect = _get_host

        # TODO: can we use a real VariableManager?
        mock_variable_manager = MagicMock(name='MockVariableManager')
        mock_variable_manager.get_vars.return_value = dict()

        res = IncludedFile.process_include_results(results, mock_tqm, mock_iterator,
                                                   mock_inventory, fake_loader,
                                                   mock_variable_manager)
        self.assertIsInstance(res, list)
        self.assertEquals(res[0]._filename, os.path.join(os.getcwd(), 'include_test.yml'))
        self.assertEquals(res[1]._filename, os.path.join(os.getcwd(), 'other_include_test.yml'))

        self.assertEquals(res[0]._hosts, ['testhost1'])
        self.assertEquals(res[1]._hosts, ['testhost2'])

        self.assertEquals(res[0]._args, {})
        self.assertEquals(res[1]._args, {})
Ejemplo n.º 6
0
    def get_next_task_for_host(self, host, peek=False):

        display.debug("getting the next task for host %s" % host.name)
        s = self.get_host_state(host)

        task = None
        if s.run_state == self.ITERATING_COMPLETE:
            display.debug("host %s is done iterating, returning" % host.name)
            return (None, None)
        elif s.run_state == self.ITERATING_SETUP:
            s.run_state = self.ITERATING_TASKS
            s.pending_setup = True

            # Gather facts if the default is 'smart' and we have not yet
            # done it for this host; or if 'explicit' and the play sets
            # gather_facts to True; or if 'implicit' and the play does
            # NOT explicitly set gather_facts to False.

            gathering = C.DEFAULT_GATHERING
            implied = self._play.gather_facts is None or boolean(self._play.gather_facts)

            if (gathering == 'implicit' and implied) or \
               (gathering == 'explicit' and boolean(self._play.gather_facts)) or \
               (gathering == 'smart' and implied and not host._gathered_facts):
                if not peek:
                    # mark the host as having gathered facts
                    host.set_gathered_facts(True)

                task = Task()
                task.action = 'setup'
                task.args   = {}
                task.set_loader(self._play._loader)
            else:
                s.pending_setup = False

        if not task:
            (s, task) = self._get_next_task_from_state(s, peek=peek)

        if task and task._role:
            # if we had a current role, mark that role as completed
            if s.cur_role and task._role != s.cur_role and host.name in s.cur_role._had_task_run and not peek:
                s.cur_role._completed[host.name] = True
            s.cur_role = task._role

        if not peek:
            self._host_states[host.name] = s

        display.debug("done getting next task for host %s" % host.name)
        display.debug(" ^ task is: %s" % task)
        display.debug(" ^ state is: %s" % s)
        return (s, task)
Ejemplo n.º 7
0
 def _load_list_of_tasks(self, ds):
     assert type(ds) == list
     task_list = []
     for task in ds:
         t = Task.load(task)
         task_list.append(t)
     return task_list
Ejemplo n.º 8
0
    def test_load_task_kv_form_error_36848(self, mock_get_err_lines):
        ds = objects.AnsibleMapping(kv_bad_args_ds)
        ds.ansible_pos = ('test_task_faux_playbook.yml', 1, 1)
        mock_get_err_lines.return_value = (kv_bad_args_str, '')

        with self.assertRaises(errors.AnsibleParserError) as cm:
            Task.load(ds)

        self.assertIsInstance(cm.exception, errors.AnsibleParserError)
        self.assertEqual(cm.exception.obj, ds)
        self.assertEqual(cm.exception.obj, kv_bad_args_ds)
        self.assertIn("The error appears to be in 'test_task_faux_playbook.yml", cm.exception.message)
        self.assertIn(kv_bad_args_str, cm.exception.message)
        self.assertIn('apk', cm.exception.message)
        self.assertEqual(cm.exception.message.count('The offending line'), 1)
        self.assertEqual(cm.exception.message.count('The error appears to be in'), 1)
Ejemplo n.º 9
0
    def _prepare_and_create_noop_block_from(self, original_block, parent, iterator):
        self.noop_task = Task()
        self.noop_task.action = 'meta'
        self.noop_task.args['_raw_params'] = 'noop'
        self.noop_task.set_loader(iterator._play._loader)

        return self._create_noop_block_from(original_block, parent)
Ejemplo n.º 10
0
def load_list_of_tasks(ds, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
    '''
    Given a list of task datastructures (parsed from YAML),
    return a list of Task() or TaskInclude() objects.
    '''

    # we import here to prevent a circular dependency with imports
    from ansible.playbook.block import Block
    from ansible.playbook.handler import Handler
    from ansible.playbook.task import Task

    assert type(ds) == list

    task_list = []
    for task in ds:
        if not isinstance(task, dict):
            raise AnsibleParserError("task/handler entries must be dictionaries (got a %s)" % type(task), obj=ds)

        if 'block' in task:
            t = Block.load(
                task,
                parent_block=block,
                role=role,
                task_include=task_include,
                use_handlers=use_handlers,
                variable_manager=variable_manager,
                loader=loader,
            )
        else:
            if use_handlers:
                t = Handler.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
            else:
                t = Task.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
Ejemplo n.º 11
0
    def _poll_async_result(self, result, templar):
        '''
        Polls for the specified JID to be complete
        '''

        async_jid = result.get('ansible_job_id')
        if async_jid is None:
            return dict(failed=True, msg="No job id was returned by the async task")

        # Create a new psuedo-task to run the async_status module, and run
        # that (with a sleep for "poll" seconds between each retry) until the
        # async time limit is exceeded.

        async_task = Task().load(dict(action='async_status jid=%s' % async_jid))

        # Because this is an async task, the action handler is async. However,
        # we need the 'normal' action handler for the status check, so get it
        # now via the action_loader
        normal_handler = self._shared_loader_obj.action_loader.get(
            'normal',
            task=async_task,
            connection=self._connection,
            play_context=self._play_context,
            loader=self._loader,
            templar=templar,
            shared_loader_obj=self._shared_loader_obj,
        )

        time_left = self._task.async
Ejemplo n.º 12
0
def test_process_include_results(mock_iterator, mock_variable_manager):
    hostname = "testhost1"
    hostname2 = "testhost2"

    parent_task_ds = {'debug': 'msg=foo'}
    parent_task = Task.load(parent_task_ds)
    parent_task._play = None

    task_ds = {'include': 'include_test.yml'}
    loaded_task = TaskInclude.load(task_ds, task_include=parent_task)

    return_data = {'include': 'include_test.yml'}
    # The task in the TaskResult has to be a TaskInclude so it has a .static attr
    result1 = task_result.TaskResult(host=hostname,
                                     task=loaded_task,
                                     return_data=return_data)
    result2 = task_result.TaskResult(host=hostname2,
                                     task=loaded_task,
                                     return_data=return_data)
    results = [result1, result2]

    fake_loader = DictDataLoader({'include_test.yml': ""})

    res = IncludedFile.process_include_results(results, mock_iterator,
                                               fake_loader,
                                               mock_variable_manager)
    assert isinstance(res, list)
    assert len(res) == 1
    assert res[0]._filename == os.path.join(os.getcwd(), 'include_test.yml')
    assert res[0]._hosts == ['testhost1', 'testhost2']
    assert res[0]._args == {}
    assert res[0]._vars == {}
Ejemplo n.º 13
0
    def _load_tasks(self, ds, keyname):
        ''' handle task and handler include statements '''

        tasks = ds.get(keyname, [])
        results = []
        for x in tasks:
            task_vars = self.vars.copy()
            if 'include' in x:
                tokens = shlex.split(x['include'])

                for t in tokens[1:]:
                    (k, v) = t.split("=", 1)
                    task_vars[k] = v
                include_file = tokens[0]
                data = utils.parse_yaml_from_file(
                    utils.path_dwim(self.playbook.basedir, include_file))
            elif type(x) == dict:
                data = [x]
            else:
                raise Exception("unexpected task type")
            for y in data:
                items = y.get('with_items', None)
                if items is None:
                    items = ['']
                elif isinstance(items, basestring):
                    items = utils.varLookup(items, task_vars)
                for item in items:
                    mv = task_vars.copy()
                    mv['item'] = item
                    results.append(Task(self, y, module_vars=mv))
        return results
Ejemplo n.º 14
0
def load_list_of_tasks(ds, block=None, role=None, task_include=None, loader=None):
    """
    Given a list of task datastructures (parsed from YAML),
    return a list of Task() or TaskInclude() objects.
    """

    # we import here to prevent a circular dependency with imports
    from ansible.playbook.task import Task
    from ansible.playbook.task_include import TaskInclude

    assert type(ds) == list

    task_list = []
    for task in ds:
        if not isinstance(task, dict):
            raise AnsibleParserError("task/handler entries must be dictionaries (got a %s)" % type(task), obj=ds)

        if "include" in task:
            cur_basedir = None
            if isinstance(task, AnsibleBaseYAMLObject) and loader:
                pos_info = task.get_position_info()
                new_basedir = os.path.dirname(pos_info[0])
                cur_basedir = loader.get_basedir()
                loader.set_basedir(new_basedir)

            t = TaskInclude.load(task, block=block, role=role, task_include=task_include, loader=loader)

            if cur_basedir and loader:
                loader.set_basedir(cur_basedir)
        else:
            t = Task.load(task, block=block, role=role, task_include=task_include, loader=loader)

        task_list.append(t)

    return task_list
Ejemplo n.º 15
0
    def setUp(self, mock_socket):
        # TODO: this python version validation won't be needed as long as the _time_ns call is mocked.
        if sys.version_info < OPENTELEMETRY_MINIMUM_PYTHON_VERSION:
            self.skipTest(
                "Python %s+ is needed for OpenTelemetry" %
                ",".join(map(str, OPENTELEMETRY_MINIMUM_PYTHON_VERSION)))

        mock_socket.gethostname.return_value = 'my-host'
        mock_socket.gethostbyname.return_value = '1.2.3.4'
        self.opentelemetry = OpenTelemetrySource(display=None)
        self.task_fields = {'args': {}}
        self.mock_host = Mock('MockHost')
        self.mock_host.name = 'myhost'
        self.mock_host._uuid = 'myhost_uuid'
        self.mock_task = Task()
        self.mock_task.action = 'myaction'
        self.mock_task.no_log = False
        self.mock_task._role = 'myrole'
        self.mock_task._uuid = 'myuuid'
        self.mock_task.args = {}
        self.mock_task.get_name = MagicMock(return_value='mytask')
        self.mock_task.get_path = MagicMock(return_value='/mypath')
        self.my_task = TaskData('myuuid', 'mytask', '/mypath', 'myplay',
                                'myaction', '')
        self.my_task_result = TaskResult(host=self.mock_host,
                                         task=self.mock_task,
                                         return_data={},
                                         task_fields=self.task_fields)
Ejemplo n.º 16
0
 def test_conga_role_multiple_matches_use_first_and_warn(self):
     task = Task(None, MockRole("dispatcher"), None)
     with patch(
             'action_plugins.conga_facts.display.warning') as mock_warning:
         facts = MockModule(task).get_facts()
         self.assertIn('author', facts.get('conga_variants'))
         self.assertNotIn('publish', facts.get('conga_variants'))
         mock_warning.assert_called()
    def test_v2_runner_on_failed(self):
        """
        Verify failed results uses the logger.
        """
        result = TaskResult('127.0.0.1', Task(), {'exception': 'error'})

        self.logforward.v2_runner_on_failed(result)
        self.assertEqual(1, self.logforward.log.warn.call_count)
Ejemplo n.º 18
0
 def test_conga_config_path_default(self):
     task = Task(None, MockRole('db'), None)
     task_vars = dict(TASK_VARS)
     task_vars.pop("conga_target_path")
     task_vars.pop("conga_node")
     facts = MockModule(task).get_facts(task_vars)
     self.assertEqual("basedir/target/configuration/environment/hostname",
                      facts.get('conga_config_path'))
Ejemplo n.º 19
0
 def test_conga_config_path_custom(self):
     task = Task(None, MockRole('db'), None)
     facts = MockModule(task).get_facts()
     self.assertEqual("basedir/target_path/environment/node",
                      facts.get('conga_config_path'))
     self.assertEqual("basedir", facts.get('conga_basedir'))
     self.assertDictEqual({"path": "/opt/db"}, facts.get('conga_config'))
     self.assertEqual([{'tenant': 'tenant1'}], facts.get('conga_tenants'))
Ejemplo n.º 20
0
    def _load_tasks(self, ds, keyname):
        ''' handle task and handler include statements '''

        tasks = ds.get(keyname, [])
        results = []
        for x in tasks:
            if 'include' in x:
                task_vars = self.vars.copy()
                tokens = shlex.split(x['include'])
                items = ['']
                for k in x:
                    if not k.startswith("with_"):
                        continue
                    plugin_name = k[5:]
                    if plugin_name not in utils.plugins.lookup_loader:
                        raise errors.AnsibleError(
                            "cannot find lookup plugin named %s for usage in with_%s"
                            % (plugin_name, plugin_name))
                    terms = utils.template_ds(self.basedir, x[k], task_vars)
                    items = utils.plugins.lookup_loader.get(
                        plugin_name, basedir=self.basedir,
                        runner=None).run(terms, inject=task_vars)

                for item in items:
                    mv = task_vars.copy()
                    mv['item'] = item
                    for t in tokens[1:]:
                        (k, v) = t.split("=", 1)
                        mv[k] = utils.template_ds(self.basedir, v, mv)
                    include_file = utils.template(self.basedir, tokens[0], mv)
                    data = utils.parse_yaml_from_file(
                        utils.path_dwim(self.basedir, include_file))
                    for y in data:
                        results.append(Task(self, y, module_vars=mv.copy()))
            elif type(x) == dict:
                task_vars = self.vars.copy()
                results.append(Task(self, x, module_vars=task_vars))
            else:
                raise Exception("unexpected task type")

        for x in results:
            if self.tags is not None:
                x.tags.extend(self.tags)

        return results
 def _make_task_vars(
         self, task: Task, hostname: str = None, play: Play = None
 ) -> Tuple[Dict[str, Any], Task, Host]:
     variable_manager: VariableManager = task.get_variable_manager()
     # data_loader = task.get_loader()
     inventory: InventoryManager = variable_manager._inventory
     host = None if not hostname else inventory.get_host(hostname)
     task_vars = variable_manager.get_vars(play=play, task=task, host=host)
     return task_vars, task, host
    def test_v2_runner_on_unreachable(self):
        """
        Verify UNREACHABLE results uses the logger.
        """
        result = TaskResult('127.0.0.1', Task(), {})
        result._host = Host('127.0.0.1')

        self.logforward.v2_runner_on_unreachable(result)
        self.assertEqual(1, self.logforward.log.warn.call_count)
    def test_v2_runner_on_ok(self):
        """
        Verify OK results uses the logger.
        """
        result = TaskResult('127.0.0.1', Task(), {})
        result._host = Host('127.0.0.1')

        self.logforward.v2_runner_on_ok(result)
        self.assertEqual(1, self.logforward.log.info.call_count)
    def test_v2_runner_on_skipped(self):
        """
        Verify SKIPPED results uses the logger.
        """
        result = TaskResult('127.0.0.1', Task(), {})
        result._host = Host('127.0.0.1')

        self.logforward.v2_runner_on_skipped(result)
        self.assertEqual(1, self.logforward.log.warn.call_count)
Ejemplo n.º 25
0
    def __init__(self,
                 inventory,
                 play,
                 play_context,
                 variable_manager,
                 all_vars,
                 start_at_done=False):
        self._play = play
        self._blocks = []

        setup_block = Block(play=self._play)
        setup_task = Task(block=setup_block)
        setup_task.action = 'setup'
        setup_task.tags = ['always']
        setup_task.args = {}
        setup_task.set_loader(self._play._loader)
        setup_block.block = [setup_task]

        setup_block = setup_block.filter_tagged_tasks(play_context, all_vars)
        self._blocks.append(setup_block)

        for block in self._play.compile():
            new_block = block.filter_tagged_tasks(play_context, all_vars)
            if new_block.has_tasks():
                self._blocks.append(new_block)

        self._host_states = {}
        start_at_matched = False
        for host in inventory.get_hosts(self._play.hosts):
            self._host_states[host.name] = HostState(blocks=self._blocks)
            # if the host's name is in the variable manager's fact cache, then set
            # its _gathered_facts flag to true for smart gathering tests later
            if host.name in variable_manager._fact_cache:
                host._gathered_facts = True
            # if we're looking to start at a specific task, iterate through
            # the tasks for this host until we find the specified task
            if play_context.start_at_task is not None and not start_at_done:
                while True:
                    (s, task) = self.get_next_task_for_host(host, peek=True)
                    if s.run_state == self.ITERATING_COMPLETE:
                        break
                    if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \
                       task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
                        start_at_matched = True
                        break
                    else:
                        self.get_next_task_for_host(host)

                # finally, reset the host's state to ITERATING_SETUP
                self._host_states[host.name].run_state = self.ITERATING_SETUP

        if start_at_matched:
            # we have our match, so clear the start_at_task field on the
            # play context to flag that we've started at a task (and future
            # plays won't try to advance)
            play_context.start_at_task = None

        # Extend the play handlers list to include the handlers defined in roles
        self._play.handlers.extend(play.compile_roles_handlers())
    def _copy(self, tmp, dest, src=None, content=None):  # pylint: disable=too-many-arguments
        task = Task()
        task.args = {
            "dest": dest,
            "content": content,
            "src": src,
            "force": "true",
        }
        res = Copy(
            task=task,
            connection=self._connection,
            play_context=self._play_context,
            loader=self._loader,
            templar=self._templar,
            shared_loader_obj=self._shared_loader_obj,
        ).run(tmp, self.task_vars)

        if res.get("failed"):
            raise AnsibleError(res)
Ejemplo n.º 27
0
 def test_conga_role_multiple_matches_with_variant_mapping(self):
     task = Task(None, MockRole("dispatcher"), None)
     task_vars = dict(TASK_VARS)
     task_vars['conga_variant_mapping'] = "publish"
     with patch(
             'action_plugins.conga_facts.display.warning') as mock_warning:
         facts = MockModule(task).get_facts(task_vars)
         self.assertIn('publish', facts.get('conga_variants'))
         self.assertNotIn('author', facts.get('conga_variants'))
         mock_warning.assert_not_called()
    def test_v2_runner_on_failed(self):
        """
        Verify failed results uses the logger.
        """
        result = TaskResult('127.0.0.1', Task(), {'exception': 'error'})
        result._host = MagicMock()
        result._host.get_name.return_value = '127.0.0.1'

        self.logforward.v2_runner_on_failed(result)
        self.assertEqual(1, self.logforward.log.warn.call_count)
Ejemplo n.º 29
0
    def get_task_content(task: Task):
        sha512 = hashlib.sha512()
        serialized_data = task.get_ds()
        if not serialized_data:
            # ansible 2.8
            serialized_data = task.dump_attrs()
        if not serialized_data:
            logger.error(
                "unable to obtain task content from ansible: caching will not work"
            )
            return
        c = json.dumps(serialized_data, sort_keys=True)

        logger.debug("content = %s", c)
        sha512.update(c.encode("utf-8"))

        # If task is a file action, cache the src.
        #
        # Take the file stats of the src (if directory, get the stats
        # of every file within) and concatinate it with the task config
        # (assigned under serialized_data)
        #
        # The idea is that, if a file is changed, so will its modification time,
        # which will force the layer to be reloaded. Otherwise, load from cache.
        #
        # Note: serialized_data was grabbed above.
        task_config = task.dump_attrs()
        if (('args' in task_config) and ('src' in task_config['args'])):
            src = task_config['args']['src']
            src_path = os.path.join(task.get_search_path()[0], "files", src)

            if (not (os.path.exists(src_path))):
                src_path = os.path.join(task.get_search_path()[0], src)

            if os.path.isdir(src_path):
                dir_hash = CallbackModule.get_dir_fingerprint(src_path)
                sha512.update(dir_hash.encode("utf-8"))
            elif os.path.isfile(src_path):
                the_file = pathlib.Path(src_path)
                date_modified = str(the_file.stat().st_mtime)
                sha512.update(date_modified.encode("utf-8"))

        return sha512.hexdigest()
Ejemplo n.º 30
0
    def __init__(self):
        initial_dir = os.getcwd()
        ansible_basedir = os.path.join(
            os.environ.get("PROJECT_ENVIRONMENT_FILES_PATH"), "ansible")

        # Move to project directory
        os.chdir(os.environ.get("PROJECT_ENVIRONMENT_FILES_PATH"))

        # Load list of inventories from config:w
        config = ConfigManager('/etc/ansible/ansible.cfg')
        sources = config.data.get_setting('DEFAULT_HOST_LIST').value

        loader = CustomLoader()
        loader.set_basedir(ansible_basedir)

        # load the inventory, set the basic playbook directory
        self._inventory = CustomInventoryManager(loader=loader,
                                                 sources=sources)
        var_manager = VariableManager(loader=loader, inventory=self._inventory)
        play = Play.load(dict(hosts=['all']),
                         loader=loader,
                         variable_manager=var_manager)

        # Move back to directory of origin
        os.chdir(initial_dir)

        control_host = None
        if 'control' in self._inventory.groups:
            control_group = self._inventory.groups['control']

            if len(control_group.get_hosts()) > 0:
                control_host = control_group.get_hosts()[0]

        # Hostvars
        hostvars = {}
        for host in self._inventory.get_hosts():
            hostvars[host.name] = host.vars

        # make sure we load all magic variables on top of the global variables
        self._vars = combine_vars(
            var_manager.get_vars(play=play, task=Task(), host=control_host), {
                'hostvars': hostvars,
                'env': os.environ
            })

        # create the template renderer
        self._templar = Templar(loader=loader, variables=self._vars)

        # setup some easy variables that we use a lot
        self._vars['control_ip'] = self.get_var(
            "hostvars[groups['control'][0]]['ansible_host']")
        self._vars['edge_ip'] = self.get_var(
            "hostvars[groups['edge'][0]]['ansible_host']")
        self._vars['monitor_ip'] = self.get_var(
            "hostvars[groups['monitor'][0]]['ansible_host']")
Ejemplo n.º 31
0
def load_list_of_tasks(ds,
                       play,
                       block=None,
                       role=None,
                       task_include=None,
                       use_handlers=False,
                       variable_manager=None,
                       loader=None):
    '''
    Given a list of task datastructures (parsed from YAML),
    return a list of Task() or TaskInclude() objects.
    '''

    # we import here to prevent a circular dependency with imports
    from ansible.playbook.block import Block
    from ansible.playbook.handler import Handler
    from ansible.playbook.task import Task

    assert isinstance(ds, list)

    task_list = []
    for task in ds:
        assert isinstance(task, dict)

        if 'block' in task:
            t = Block.load(
                task,
                play=play,
                parent_block=block,
                role=role,
                task_include=task_include,
                use_handlers=use_handlers,
                variable_manager=variable_manager,
                loader=loader,
            )
        else:
            if use_handlers:
                t = Handler.load(task,
                                 block=block,
                                 role=role,
                                 task_include=task_include,
                                 variable_manager=variable_manager,
                                 loader=loader)
            else:
                t = Task.load(task,
                              block=block,
                              role=role,
                              task_include=task_include,
                              variable_manager=variable_manager,
                              loader=loader)

        task_list.append(t)

    return task_list
Ejemplo n.º 32
0
    def compile(self):
        '''
        Compiles and returns the task list for this play, compiled from the
        roles (which are themselves compiled recursively) and/or the list of
        tasks specified in the play.
        '''

        # create a block containing a single flush handlers meta
        # task, so we can be sure to run handlers at certain points
        # of the playbook execution
        flush_block = Block.load(
            data={'meta': 'flush_handlers'},
            play=self,
            variable_manager=self._variable_manager,
            loader=self._loader
        )

        for task in flush_block.block:
            task.implicit = True

        block_list = []
        if self.force_handlers:
            noop_task = Task()
            noop_task.action = 'meta'
            noop_task.args['_raw_params'] = 'noop'
            noop_task.implicit = True
            noop_task.set_loader(self._loader)

            b = Block(play=self)
            b.block = self.pre_tasks or [noop_task]
            b.always = [flush_block]
            block_list.append(b)

            tasks = self._compile_roles() + self.tasks
            b = Block(play=self)
            b.block = tasks or [noop_task]
            b.always = [flush_block]
            block_list.append(b)

            b = Block(play=self)
            b.block = self.post_tasks or [noop_task]
            b.always = [flush_block]
            block_list.append(b)

            return block_list

        block_list.extend(self.pre_tasks)
        block_list.append(flush_block)
        block_list.extend(self._compile_roles())
        block_list.extend(self.tasks)
        block_list.append(flush_block)
        block_list.extend(self.post_tasks)
        block_list.append(flush_block)

        return block_list
Ejemplo n.º 33
0
    def get_next_task_for_host(self, host, peek=False):

        s = self.get_host_state(host)

        task = None
        if s.run_state == self.ITERATING_COMPLETE:
            return None
        elif s.run_state == self.ITERATING_SETUP:
            s.run_state = self.ITERATING_TASKS
            s.pending_setup = True
            if self._play.gather_facts == 'smart' and not host._gathered_facts or boolean(self._play.gather_facts):
                if not peek:
                    # mark the host as having gathered facts
                    host.set_gathered_facts(True)

                task = Task()
                task.action = 'setup'
                task.args   = {}
                task.set_loader(self._play._loader)
            else:
                s.pending_setup = False

        if not task:
            (s, task) = self._get_next_task_from_state(s, peek=peek)

        if task and task._role:
            # if we had a current role, mark that role as completed
            if s.cur_role and task._role != s.cur_role and s.cur_role._had_task_run and not peek:
                s.cur_role._completed = True
            s.cur_role = task._role

        if not peek:
            self._host_states[host.name] = s

        return (s, task)
Ejemplo n.º 34
0
    def compile(self, play, dep_chain=None):
        '''
        Returns the task list for this role, which is created by first
        recursively compiling the tasks for all direct dependencies, and
        then adding on the tasks for this role.

        The role compile() also remembers and saves the dependency chain
        with each task, so tasks know by which route they were found, and
        can correctly take their parent's tags/conditionals into account.
        '''
        from ansible.playbook.block import Block
        from ansible.playbook.task import Task

        block_list = []

        # update the dependency chain here
        if dep_chain is None:
            dep_chain = []
        new_dep_chain = dep_chain + [self]

        deps = self.get_direct_dependencies()
        for dep in deps:
            dep_blocks = dep.compile(play=play, dep_chain=new_dep_chain)
            block_list.extend(dep_blocks)

        for task_block in self._task_blocks:
            new_task_block = task_block.copy()
            new_task_block._dep_chain = new_dep_chain
            new_task_block._play = play
            block_list.append(new_task_block)

        eor_block = Block(play=play)
        eor_block._loader = self._loader
        eor_block._role = self
        eor_block._variable_manager = self._variable_manager
        eor_block.run_once = False

        eor_task = Task(block=eor_block)
        eor_task._role = self
        eor_task.action = 'meta'
        eor_task.args = {'_raw_params': 'role_complete'}
        eor_task.implicit = True
        eor_task.tags = ['always']
        eor_task.when = True

        eor_block.block = [eor_task]
        block_list.append(eor_block)

        return block_list
Ejemplo n.º 35
0
    def _load_tasks(self, tasks, vars={}, default_vars={}, sudo_vars={}, additional_conditions=[], original_file=None):
        ''' handle task and handler include statements '''

        results = []
        if tasks is None:
            # support empty handler files, and the like.
            tasks = []

        for x in tasks:
            if not isinstance(x, dict):
                raise errors.AnsibleError("expecting dict; got: %s" % x)

            # evaluate sudo vars for current and child tasks 
            included_sudo_vars = {}
            for k in ["sudo", "sudo_user"]:
                if k in x:
                    included_sudo_vars[k] = x[k]
                elif k in sudo_vars:
                    included_sudo_vars[k] = sudo_vars[k]
                    x[k] = sudo_vars[k]

            if 'meta' in x:
                if x['meta'] == 'flush_handlers':
                    results.append(Task(self,x))
                    continue

            task_vars = self.vars.copy()
            task_vars.update(vars)
            if original_file:
                task_vars['_original_file'] = original_file

            if 'include' in x:
                tokens = shlex.split(str(x['include']))
                items = ['']
                included_additional_conditions = list(additional_conditions)
                include_vars = {}
                for k in x:
                    if k.startswith("with_"):
                        plugin_name = k[5:]
                        if plugin_name not in utils.plugins.lookup_loader:
                            raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name))
                        terms = template(self.basedir, x[k], task_vars)
                        items = utils.plugins.lookup_loader.get(plugin_name, basedir=self.basedir, runner=None).run(terms, inject=task_vars)
                    elif k.startswith("when_"):
                        included_additional_conditions.insert(0, utils.compile_when_to_only_if("%s %s" % (k[5:], x[k])))
                    elif k == 'when':
                        included_additional_conditions.insert(0, utils.compile_when_to_only_if("jinja2_compare %s" % x[k]))
                    elif k in ("include", "vars", "default_vars", "only_if", "sudo", "sudo_user"):
                        continue
                    else:
                        include_vars[k] = x[k]
Ejemplo n.º 36
0
 def _create_noop_task(self):
     """Create noop task"""
     self._debug('_create_noop_task...')
     noop_task = Task()
     noop_task.action = 'meta'
     noop_task.args['_raw_params'] = 'noop'
     noop_task.set_loader(self._iterator._play._loader)
     return noop_task
Ejemplo n.º 37
0
def load_list_of_tasks(ds,
                       block=None,
                       role=None,
                       task_include=None,
                       loader=None):
    '''
    Given a list of task datastructures (parsed from YAML),
    return a list of Task() or TaskInclude() objects.
    '''

    # we import here to prevent a circular dependency with imports
    from ansible.playbook.task import Task
    from ansible.playbook.task_include import TaskInclude

    assert type(ds) == list

    task_list = []
    for task in ds:
        if not isinstance(task, dict):
            raise AnsibleParserError(
                "task/handler entries must be dictionaries (got a %s)" %
                type(task),
                obj=ds)

        if 'include' in task:
            cur_basedir = None
            if isinstance(task, AnsibleBaseYAMLObject) and loader:
                pos_info = task.get_position_info()
                new_basedir = os.path.dirname(pos_info[0])
                cur_basedir = loader.get_basedir()
                loader.set_basedir(new_basedir)

            t = TaskInclude.load(task,
                                 block=block,
                                 role=role,
                                 task_include=task_include,
                                 loader=loader)

            if cur_basedir and loader:
                loader.set_basedir(cur_basedir)
        else:
            t = Task.load(task,
                          block=block,
                          role=role,
                          task_include=task_include,
                          loader=loader)

        task_list.append(t)

    return task_list
Ejemplo n.º 38
0
    def _load_tasks(self, ds, keyname):
        ''' handle task and handler include statements '''

        tasks = ds.get(keyname, [])
        results = []
        for x in tasks:
            if 'include' in x:
                task_vars = self.vars.copy()
                tokens = shlex.split(x['include'])
                if 'with_items' in x:
                    items = utils.varReplaceWithItems(self.basedir,
                                                      x['with_items'],
                                                      task_vars)
                else:
                    items = ['']
                for item in items:
                    mv = task_vars.copy()
                    mv['item'] = item
                    for t in tokens[1:]:
                        (k, v) = t.split("=", 1)
                        mv[k] = utils.varReplaceWithItems(self.basedir, v, mv)
                    include_file = utils.template(self.basedir, tokens[0], mv)
                    data = utils.parse_yaml_from_file(
                        utils.path_dwim(self.basedir, include_file))
                    for y in data:
                        results.append(Task(self, y, module_vars=mv.copy()))
            elif type(x) == dict:
                task_vars = self.vars.copy()
                results.append(Task(self, x, module_vars=task_vars))
            else:
                raise Exception("unexpected task type")

        for x in results:
            if self.tags is not None:
                x.tags.extend(self.tags)

        return results
Ejemplo n.º 39
0
    def deserialize(self, data):
        '''
        Override of the default deserialize method, to match the above overridden
        serialize method
        '''

        from ansible.playbook.task import Task

        # we don't want the full set of attributes (the task lists), as that
        # would lead to a serialize/deserialize loop
        for attr in self._get_base_attributes():
            if attr in data and attr not in ('block', 'rescue', 'always'):
                setattr(self, attr, data.get(attr))

        self._dep_chain = data.get('dep_chain', None)

        # if there was a serialized role, unpack it too
        role_data = data.get('role')
        if role_data:
            r = Role()
            r.deserialize(role_data)
            self._role = r

        # if there was a serialized task include, unpack it too
        ti_data = data.get('task_include')
        if ti_data:
            ti = Task()
            ti.deserialize(ti_data)
            self._task_include = ti

        pb_data = data.get('parent_block')
        if pb_data:
            pb = Block()
            pb.deserialize(pb_data)
            self._parent_block = pb
            self._dep_chain = self._parent_block.get_dep_chain()
Ejemplo n.º 40
0
    def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
        self._play = play
        self._blocks = []

        setup_block = Block(play=self._play)
        setup_task = Task(block=setup_block)
        setup_task.action = 'setup'
        setup_task.tags   = ['always']
        setup_task.args   = {}
        setup_task.set_loader(self._play._loader)
        setup_block.block = [setup_task]

        setup_block = setup_block.filter_tagged_tasks(play_context, all_vars)
        self._blocks.append(setup_block)

        for block in self._play.compile():
            new_block = block.filter_tagged_tasks(play_context, all_vars)
            if new_block.has_tasks():
                self._blocks.append(new_block)

        self._host_states = {}
        start_at_matched = False
        for host in inventory.get_hosts(self._play.hosts):
            self._host_states[host.name] = HostState(blocks=self._blocks)
            # if the host's name is in the variable manager's fact cache, then set
            # its _gathered_facts flag to true for smart gathering tests later
            if host.name in variable_manager._fact_cache:
                host._gathered_facts = True
            # if we're looking to start at a specific task, iterate through
            # the tasks for this host until we find the specified task
            if play_context.start_at_task is not None and not start_at_done:
                while True:
                    (s, task) = self.get_next_task_for_host(host, peek=True)
                    if s.run_state == self.ITERATING_COMPLETE:
                        break
                    if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \
                       task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
                        start_at_matched = True
                        break
                    else:
                        self.get_next_task_for_host(host)

                # finally, reset the host's state to ITERATING_SETUP
                if start_at_matched:
                    self._host_states[host.name].did_start_at_task = True
                    self._host_states[host.name].run_state = self.ITERATING_SETUP

        if start_at_matched:
            # we have our match, so clear the start_at_task field on the
            # play context to flag that we've started at a task (and future
            # plays won't try to advance)
            play_context.start_at_task = None

        # Extend the play handlers list to include the handlers defined in roles
        self._play.handlers.extend(play.compile_roles_handlers())
Ejemplo n.º 41
0
def load_list_of_tasks(ds, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
    '''
    Given a list of task datastructures (parsed from YAML),
    return a list of Task() or TaskInclude() objects.
    '''

    # we import here to prevent a circular dependency with imports
    from ansible.playbook.handler import Handler
    from ansible.playbook.task import Task
    #from ansible.playbook.task_include import TaskInclude

    assert type(ds) == list

    task_list = []
    for task in ds:
        if not isinstance(task, dict):
            raise AnsibleParserError("task/handler entries must be dictionaries (got a %s)" % type(task), obj=ds)

        #if 'include' in task:
        #    cur_basedir = None
        #    if isinstance(task, AnsibleBaseYAMLObject) and loader:
        #        pos_info = task.get_position_info()
        #        new_basedir = os.path.dirname(pos_info[0])
        #        cur_basedir = loader.get_basedir()
        #        loader.set_basedir(new_basedir)

        #    t = TaskInclude.load(
        #        task,
        #        block=block,
        #        role=role,
        #        task_include=task_include,
        #        use_handlers=use_handlers,
        #        loader=loader
        #    )

        #    if cur_basedir and loader:
        #        loader.set_basedir(cur_basedir)
        #else:
        if True:
            if use_handlers:
                t = Handler.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
            else:
                t = Task.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)

        task_list.append(t)

    return task_list
Ejemplo n.º 42
0
def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
    '''
    Given a list of task datastructures (parsed from YAML),
    return a list of Task() or TaskInclude() objects.
    '''

    # we import here to prevent a circular dependency with imports
    from ansible.playbook.block import Block
    from ansible.playbook.handler import Handler
    from ansible.playbook.task import Task

    assert isinstance(ds, list)

    task_list = []
    for task in ds:
        assert isinstance(task, dict)

        if 'block' in task:
            t = Block.load(
                task,
                play=play,
                parent_block=block,
                role=role,
                task_include=task_include,
                use_handlers=use_handlers,
                variable_manager=variable_manager,
                loader=loader,
            )
        else:
            if use_handlers:
                t = Handler.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader, play=play)
            else:
                t = Task.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)

        if isinstance(t, list):
            task_list.extend(t)
        else:
            task_list.append(t)

    return task_list
Ejemplo n.º 43
0
def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
    '''
    Given a list of task datastructures (parsed from YAML),
    return a list of Task() or TaskInclude() objects.
    '''

    # we import here to prevent a circular dependency with imports
    from ansible.playbook.block import Block
    from ansible.playbook.handler import Handler
    from ansible.playbook.task import Task

    if not isinstance(ds, list):
        raise AnsibleParserError('task has bad type: "%s". Expected "list"' % type(ds).__name__, obj=ds)

    task_list = []
    for task in ds:
        if not isinstance(task, dict):
            raise AnsibleParserError('task/handler has bad type: "%s". Expected "dict"' % type(task).__name__, obj=task)

        if 'block' in task:
            t = Block.load(
                task,
                play=play,
                parent_block=block,
                role=role,
                task_include=task_include,
                use_handlers=use_handlers,
                variable_manager=variable_manager,
                loader=loader,
            )
        else:
            if use_handlers:
                t = Handler.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
            else:
                t = Task.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)

        task_list.append(t)

    return task_list
Ejemplo n.º 44
0
var_manager = VariableManager()

debug("loading inventory")
inventory = Inventory(host_list='/tmp/med_inventory', loader=loader, variable_manager=var_manager)
hosts = inventory.get_hosts()[:]
debug("done loading inventory")

play_context = PlayContext()
play_context.connection = 'local'

for i in range(NUM_TASKS):
   #for j in range(NUM_HOSTS):
   for h in hosts:
      debug("queuing %s %d" % (h, i))
      #h = Host(name="host%06d" % j)
      t = Task().load(dict(name="task %d" % (i,), debug="msg='hello from %s, %d'" % (h,i)))
      #t = Task().load(dict(name="task %d" % (i,), ping=""))
      #task_vars = var_manager.get_vars(loader=loader, host=h, task=t)
      task_vars = dict()
      new_t = t.copy()
      new_t.post_validate(task_vars)
      send_data((h, t, task_vars, play_context))
      debug("done queuing %s %d" % (h, i))
      _process_pending_results()
   debug("waiting for the results to drain...")
   _wait_on_pending_results()

res_q.close()
res_p.terminate()

for (w_p, main_q, wrkr_q) in workers:
Ejemplo n.º 45
0
 def test_load_task_simple(self):
     t = Task.load(basic_shell_task)
     assert t is not None
     self.assertEqual(t.name, basic_shell_task["name"])
     self.assertEqual(t.action, "command")
     self.assertEqual(t.args, dict(_raw_params="echo hi", _uses_shell=True))
Ejemplo n.º 46
0
 def test_load_task_kv_form(self):
     t = Task.load(kv_shell_task)
     self.assertEqual(t.action, "command")
     self.assertEqual(t.args, dict(_raw_params="echo hi", _uses_shell=True))
Ejemplo n.º 47
0
    def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
        self._play = play
        self._blocks = []
        self._variable_manager = variable_manager

        self._task_uuid_cache = dict()

        # Default options to gather
        gather_subset = play_context.gather_subset
        gather_timeout = play_context.gather_timeout
        fact_path = play_context.fact_path

        # Retrieve subset to gather
        if self._play.gather_subset is not None:
            gather_subset = self._play.gather_subset
        # Retrieve timeout for gather
        if self._play.gather_timeout is not None:
            gather_timeout = self._play.gather_timeout
        # Retrieve fact_path
        if self._play.fact_path is not None:
            fact_path = self._play.fact_path

        setup_block = Block(play=self._play)
        setup_task = Task(block=setup_block)
        setup_task.action = 'setup'
        setup_task.name = 'Gathering Facts'
        setup_task.tags = ['always']
        setup_task.args = {
            'gather_subset': gather_subset,
        }
        if gather_timeout:
            setup_task.args['gather_timeout'] = gather_timeout
        if fact_path:
            setup_task.args['fact_path'] = fact_path
        setup_task.set_loader(self._play._loader)
        # short circuit fact gathering if the entire playbook is conditional
        if self._play._included_conditional is not None:
            setup_task.when = self._play._included_conditional[:]
        setup_block.block = [setup_task]

        setup_block = setup_block.filter_tagged_tasks(play_context, all_vars)
        self._blocks.append(setup_block)
        self.cache_block_tasks(setup_block)

        for block in self._play.compile():
            new_block = block.filter_tagged_tasks(play_context, all_vars)
            if new_block.has_tasks():
                self.cache_block_tasks(new_block)
                self._blocks.append(new_block)

        for handler_block in self._play.handlers:
            self.cache_block_tasks(handler_block)

        self._host_states = {}
        start_at_matched = False
        for host in inventory.get_hosts(self._play.hosts):
            self._host_states[host.name] = HostState(blocks=self._blocks)
            # if we're looking to start at a specific task, iterate through
            # the tasks for this host until we find the specified task
            if play_context.start_at_task is not None and not start_at_done:
                while True:
                    (s, task) = self.get_next_task_for_host(host, peek=True)
                    if s.run_state == self.ITERATING_COMPLETE:
                        break
                    if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \
                       task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
                        start_at_matched = True
                        break
                    else:
                        self.get_next_task_for_host(host)

                # finally, reset the host's state to ITERATING_SETUP
                if start_at_matched:
                    self._host_states[host.name].did_start_at_task = True
                    self._host_states[host.name].run_state = self.ITERATING_SETUP

        if start_at_matched:
            # we have our match, so clear the start_at_task field on the
            # play context to flag that we've started at a task (and future
            # plays won't try to advance)
            play_context.start_at_task = None
taskData = []
conditionalData = []

print "\nParsed tasks....."
# Extract data from compiled playbook
for playbook_path in pbex._playbooks:
  pb = Playbook.load(playbook_path, variable_manager=pbex._variable_manager, loader=pbex._loader)
  plays = pb.get_plays()
  for play in plays:
    #print play.get_name()
    tasklist = play.get_tasks() # play.get_tasks() returns a merged list of task blocks (pre, post, etc)
    for taskBlocks in tasklist:
        for task in taskBlocks:
          hashedTask = hash(task)
          changed = Task._get_parent_attribute(task, "changed_when")
          taskData.append(hashedTask)
          if changed is not None:
              conditionalData.append(hashedTask)
          print task, hashedTask, changed


# Build graph
graph = {}

def buildGraph(listData, nodeIndex=0):

    if nodeIndex+1 == len(listData):
        return

    buildGraph(listData, nodeIndex+1)
Ejemplo n.º 49
0
    def run(self, iterator, play_context):
        '''
        The linear strategy is simple - get the next task and queue
        it for all hosts, then wait for the queue to drain before
        moving on to the next task
        '''

        # iteratate over each task, while there is one left to run
        result     = True
        work_to_do = True
        while work_to_do and not self._tqm._terminated:

            try:
                self._display.debug("getting the remaining hosts for this loop")
                hosts_left = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
                self._display.debug("done getting the remaining hosts for this loop")

                # queue up this task for each host in the inventory
                callback_sent = False
                work_to_do = False

                host_results = []
                host_tasks = self._get_next_task_lockstep(hosts_left, iterator)

                # skip control
                skip_rest   = False
                choose_step = True

                for (host, task) in host_tasks:
                    if not task:
                        continue

                    run_once = False
                    work_to_do = True


                    # test to see if the task across all hosts points to an action plugin which
                    # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
                    # will only send this task to the first host in the list.

                    try:
                        action = action_loader.get(task.action, class_only=True)
                        if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
                            run_once = True
                    except KeyError:
                        # we don't care here, because the action may simply not have a
                        # corresponding action plugin
                        pass

                    # check to see if this task should be skipped, due to it being a member of a
                    # role which has already run (and whether that role allows duplicate execution)
                    if task._role and task._role.has_run(host):
                        # If there is no metadata, the default behavior is to not allow duplicates,
                        # if there is metadata, check to see if the allow_duplicates flag was set to true
                        if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
                            self._display.debug("'%s' skipped because role has already run" % task)
                            continue

                    if task.action == 'meta':
                        self._execute_meta(task, play_context, iterator)
                    else:
                        # handle step if needed, skip meta actions as they are used internally
                        if self._step and choose_step:
                            if self._take_step(task):
                                choose_step = False
                            else:
                                skip_rest = True
                                break

                        self._display.debug("getting variables")
                        task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
                        task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
                        templar = Templar(loader=self._loader, variables=task_vars)
                        self._display.debug("done getting variables")

                        if not callback_sent:
                            display.debug("sending task start callback, copying the task so we can template it temporarily")
                            saved_name = task.name
                            display.debug("done copying, going to template now")
                            try:
                                task.name = text_type(templar.template(task.name, fail_on_undefined=False))
                                display.debug("done templating")
                            except:
                                # just ignore any errors during task name templating,
                                # we don't care if it just shows the raw name
                                display.debug("templating failed for some reason")
                                pass
                            display.debug("here goes the callback...")
                            self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
                            task.name = saved_name
                            callback_sent = True
                            display.debug("sending task start callback")

                        self._blocked_hosts[host.get_name()] = True
                        self._queue_task(host, task, task_vars, play_context)

                    results = self._process_pending_results(iterator)
                    host_results.extend(results)

                    # if we're bypassing the host loop, break out now
                    if run_once:
                        break

                # go to next host/task group
                if skip_rest:
                    continue

                self._display.debug("done queuing things up, now waiting for results queue to drain")
                results = self._wait_on_pending_results(iterator)
                host_results.extend(results)

                if not work_to_do and len(iterator.get_failed_hosts()) > 0:
                    self._display.debug("out of hosts to run on")
                    self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
                    result = False
                    break

                try:
                    included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager)
                except AnsibleError as e:
                    return False

                if len(included_files) > 0:
                    noop_task = Task()
                    noop_task.action = 'meta'
                    noop_task.args['_raw_params'] = 'noop'
                    noop_task.set_loader(iterator._play._loader)

                    all_blocks = dict((host, []) for host in hosts_left)
                    for included_file in included_files:
                        # included hosts get the task list while those excluded get an equal-length
                        # list of noop tasks, to make sure that they continue running in lock-step
                        try:
                            new_blocks = self._load_included_file(included_file, iterator=iterator)
                        except AnsibleError as e:
                            for host in included_file._hosts:
                                iterator.mark_host_failed(host)
                            self._display.warning(str(e))
                            continue

                        for new_block in new_blocks:
                            noop_block = Block(parent_block=task._block)
                            noop_block.block  = [noop_task for t in new_block.block]
                            noop_block.always = [noop_task for t in new_block.always]
                            noop_block.rescue = [noop_task for t in new_block.rescue]
                            for host in hosts_left:
                                if host in included_file._hosts:
                                    task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task)
                                    final_block = new_block.filter_tagged_tasks(play_context, task_vars)
                                    all_blocks[host].append(final_block)
                                else:
                                    all_blocks[host].append(noop_block)

                    for host in hosts_left:
                        iterator.add_tasks(host, all_blocks[host])

                self._display.debug("results queue empty")
            except (IOError, EOFError) as e:
                self._display.debug("got IOError/EOFError in task loop: %s" % e)
                # most likely an abort, return failed
                return False

        # run the base class run() method, which executes the cleanup function
        # and runs any outstanding handlers which have been triggered

        return super(StrategyModule, self).run(iterator, play_context, result)
Ejemplo n.º 50
0
    def run(self, iterator, connection_info):
        '''
        The linear strategy is simple - get the next task and queue
        it for all hosts, then wait for the queue to drain before
        moving on to the next task
        '''

        result = True

        # iteratate over each task, while there is one left to run
        work_to_do = True
        while work_to_do and not self._tqm._terminated:

            try:
                debug("getting the remaining hosts for this loop")
                self._tqm._failed_hosts = iterator.get_failed_hosts()
                hosts_left = self.get_hosts_remaining(iterator._play)
                debug("done getting the remaining hosts for this loop")
                if len(hosts_left) == 0:
                    debug("out of hosts to run on")
                    self._callback.playbook_on_no_hosts_remaining()
                    result = False
                    break

                # queue up this task for each host in the inventory
                callback_sent = False
                work_to_do = False

                host_results = []
                host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
                for (host, task) in host_tasks:
                    if not task:
                        continue

                    run_once = False
                    work_to_do = True

                    # test to see if the task across all hosts points to an action plugin which
                    # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
                    # will only send this task to the first host in the list.

                    try:
                        action = action_loader.get(task.action, class_only=True)
                        if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
                            run_once = True
                    except KeyError:
                        # we don't care here, because the action may simply not have a
                        # corresponding action plugin
                        pass

                    debug("getting variables")
                    task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
                    debug("done getting variables")

                    # check to see if this task should be skipped, due to it being a member of a
                    # role which has already run (and whether that role allows duplicate execution)
                    if task._role and task._role.has_run():
                        # If there is no metadata, the default behavior is to not allow duplicates,
                        # if there is metadata, check to see if the allow_duplicates flag was set to true
                        if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
                            debug("'%s' skipped because role has already run" % task)
                            continue

                    if not task.evaluate_tags(connection_info.only_tags, connection_info.skip_tags, task_vars) and task.action != 'setup':
                        debug("'%s' failed tag evaluation" % task)
                        continue

                    if task.action == 'meta':
                        # meta tasks store their args in the _raw_params field of args,
                        # since they do not use k=v pairs, so get that
                        meta_action = task.args.get('_raw_params')
                        if meta_action == 'noop':
                            # FIXME: issue a callback for the noop here?
                            print("%s => NOOP" % host)
                            continue
                        elif meta_action == 'flush_handlers':
                            self.run_handlers(iterator, connection_info)
                        else:
                            raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
                    else:
                        if not callback_sent:
                            self._callback.playbook_on_task_start(task.get_name(), False)
                            callback_sent = True

                        self._blocked_hosts[host.get_name()] = True
                        self._queue_task(host, task, task_vars, connection_info)

                    results = self._process_pending_results(iterator)
                    host_results.extend(results)

                    # if we're bypassing the host loop, break out now
                    if run_once:
                        break

                debug("done queuing things up, now waiting for results queue to drain")
                results = self._wait_on_pending_results(iterator)
                host_results.extend(results)

                # FIXME: this needs to be somewhere else
                class IncludedFile:
                    def __init__(self, filename, args, task):
                        self._filename = filename
                        self._args     = args
                        self._task     = task
                        self._hosts    = []
                    def add_host(self, host):
                        if host not in self._hosts:
                            self._hosts.append(host)
                    def __eq__(self, other):
                        return other._filename == self._filename and other._args == self._args
                    def __repr__(self):
                        return "%s (%s): %s" % (self._filename, self._args, self._hosts)

                included_files = []
                for res in host_results:
                    if res._task.action == 'include':
                        if res._task.loop:
                            include_results = res._result['results']
                        else:
                            include_results = [ res._result ]

                        for include_result in include_results:
                            original_task = iterator.get_original_task(res._host, res._task)
                            if original_task and original_task._role:
                                include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_file)
                            else:
                                include_file = self._loader.path_dwim(res._task.args.get('_raw_params'))

                            include_variables = include_result.get('include_variables', dict())
                            if 'item' in include_result:
                                include_variables['item'] = include_result['item']

                            inc_file = IncludedFile(include_file, include_variables, original_task)

                            try:
                                pos = included_files.index(inc_file)
                                inc_file = included_files[pos]
                            except ValueError:
                                included_files.append(inc_file)

                            inc_file.add_host(res._host)

                if len(included_files) > 0:
                    noop_task = Task()
                    noop_task.action = 'meta'
                    noop_task.args['_raw_params'] = 'noop'
                    noop_task.set_loader(iterator._play._loader)

                    all_tasks = dict((host, []) for host in hosts_left)
                    for included_file in included_files:
                        # included hosts get the task list while those excluded get an equal-length
                        # list of noop tasks, to make sure that they continue running in lock-step
                        new_tasks = self._load_included_file(included_file)
                        noop_tasks = [noop_task for t in new_tasks]
                        for host in hosts_left:
                            if host in included_file._hosts:
                                all_tasks[host].extend(new_tasks)
                            else:
                                all_tasks[host].extend(noop_tasks)

                    for host in hosts_left:
                        iterator.add_tasks(host, all_tasks[host])

                debug("results queue empty")
            except (IOError, EOFError), e:
                debug("got IOError/EOFError in task loop: %s" % e)
                # most likely an abort, return failed
                return 1
Ejemplo n.º 51
0
    def _get_next_task_lockstep(self, hosts, iterator):
        """
        Returns a list of (host, task) tuples, where the task may
        be a noop task to keep the iterator in lock step across
        all hosts.
        """

        noop_task = Task()
        noop_task.action = "meta"
        noop_task.args["_raw_params"] = "noop"
        noop_task.set_loader(iterator._play._loader)

        host_tasks = {}
        display.debug("building list of next tasks for hosts")
        for host in hosts:
            if not iterator.is_failed(host):
                host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
        display.debug("done building task lists")

        num_setups = 0
        num_tasks = 0
        num_rescue = 0
        num_always = 0

        display.debug("counting tasks in each state of execution")
        host_tasks_to_run = [
            (host, state_task) for host, state_task in iteritems(host_tasks) if state_task and state_task[1]
        ]

        if host_tasks_to_run:
            lowest_cur_block = min(
                (s.cur_block for h, (s, t) in host_tasks_to_run if s.run_state != PlayIterator.ITERATING_COMPLETE)
            )
        else:
            # empty host_tasks_to_run will just run till the end of the function
            # without ever touching lowest_cur_block
            lowest_cur_block = None

        for (k, v) in host_tasks_to_run:
            (s, t) = v

            if s.cur_block > lowest_cur_block:
                # Not the current block, ignore it
                continue

            if s.run_state == PlayIterator.ITERATING_SETUP:
                num_setups += 1
            elif s.run_state == PlayIterator.ITERATING_TASKS:
                num_tasks += 1
            elif s.run_state == PlayIterator.ITERATING_RESCUE:
                num_rescue += 1
            elif s.run_state == PlayIterator.ITERATING_ALWAYS:
                num_always += 1
        display.debug("done counting tasks in each state of execution")

        def _advance_selected_hosts(hosts, cur_block, cur_state):
            """
            This helper returns the task for all hosts in the requested
            state, otherwise they get a noop dummy task. This also advances
            the state of the host, since the given states are determined
            while using peek=True.
            """
            # we return the values in the order they were originally
            # specified in the given hosts array
            rvals = []
            display.debug("starting to advance hosts")
            for host in hosts:
                host_state_task = host_tasks.get(host.name)
                if host_state_task is None:
                    continue
                (s, t) = host_state_task
                if t is None:
                    continue
                if s.run_state == cur_state and s.cur_block == cur_block:
                    new_t = iterator.get_next_task_for_host(host)
                    rvals.append((host, t))
                else:
                    rvals.append((host, noop_task))
            display.debug("done advancing hosts to next task")
            return rvals

        # if any hosts are in ITERATING_SETUP, return the setup task
        # while all other hosts get a noop
        if num_setups:
            display.debug("advancing hosts in ITERATING_SETUP")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)

        # if any hosts are in ITERATING_TASKS, return the next normal
        # task for these hosts, while all other hosts get a noop
        if num_tasks:
            display.debug("advancing hosts in ITERATING_TASKS")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)

        # if any hosts are in ITERATING_RESCUE, return the next rescue
        # task for these hosts, while all other hosts get a noop
        if num_rescue:
            display.debug("advancing hosts in ITERATING_RESCUE")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)

        # if any hosts are in ITERATING_ALWAYS, return the next always
        # task for these hosts, while all other hosts get a noop
        if num_always:
            display.debug("advancing hosts in ITERATING_ALWAYS")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)

        # at this point, everything must be ITERATING_COMPLETE, so we
        # return None for all hosts in the list
        display.debug("all hosts are done, so returning None's for all hosts")
        return [(host, None) for host in hosts]
Ejemplo n.º 52
0
    def run(self, iterator, play_context):
        '''
        The linear strategy is simple - get the next task and queue
        it for all hosts, then wait for the queue to drain before
        moving on to the next task
        '''

        # iteratate over each task, while there is one left to run
        result = self._tqm.RUN_OK
        work_to_do = True
        while work_to_do and not self._tqm._terminated:

            try:
                display.debug("getting the remaining hosts for this loop")
                hosts_left = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
                display.debug("done getting the remaining hosts for this loop")

                # queue up this task for each host in the inventory
                callback_sent = False
                work_to_do = False

                host_results = []
                host_tasks = self._get_next_task_lockstep(hosts_left, iterator)

                # skip control
                skip_rest   = False
                choose_step = True

                # flag set if task is set to any_errors_fatal
                any_errors_fatal = False

                results = []
                for (host, task) in host_tasks:
                    if not task:
                        continue

                    if self._tqm._terminated:
                        break

                    run_once = False
                    work_to_do = True

                    # test to see if the task across all hosts points to an action plugin which
                    # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
                    # will only send this task to the first host in the list.

                    try:
                        action = action_loader.get(task.action, class_only=True)
                    except KeyError:
                        # we don't care here, because the action may simply not have a
                        # corresponding action plugin
                        action = None

                    # check to see if this task should be skipped, due to it being a member of a
                    # role which has already run (and whether that role allows duplicate execution)
                    if task._role and task._role.has_run(host):
                        # If there is no metadata, the default behavior is to not allow duplicates,
                        # if there is metadata, check to see if the allow_duplicates flag was set to true
                        if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
                            display.debug("'%s' skipped because role has already run" % task)
                            continue

                    if task.action == 'meta':
                        # for the linear strategy, we run meta tasks just once and for
                        # all hosts currently being iterated over rather than one host
                        results.extend(self._execute_meta(task, play_context, iterator))
                        if task.args.get('_raw_params', None) != 'noop':
                            run_once = True
                    else:
                        # handle step if needed, skip meta actions as they are used internally
                        if self._step and choose_step:
                            if self._take_step(task):
                                choose_step = False
                            else:
                                skip_rest = True
                                break

                        display.debug("getting variables")
                        task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
                        self.add_tqm_variables(task_vars, play=iterator._play)
                        templar = Templar(loader=self._loader, variables=task_vars)
                        display.debug("done getting variables")

                        run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False)

                        if (task.any_errors_fatal or run_once) and not task.ignore_errors:
                            any_errors_fatal = True

                        if not callback_sent:
                            display.debug("sending task start callback, copying the task so we can template it temporarily")
                            saved_name = task.name
                            display.debug("done copying, going to template now")
                            try:
                                task.name = to_text(templar.template(task.name, fail_on_undefined=False), nonstring='empty')
                                display.debug("done templating")
                            except:
                                # just ignore any errors during task name templating,
                                # we don't care if it just shows the raw name
                                display.debug("templating failed for some reason")
                                pass
                            display.debug("here goes the callback...")
                            self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
                            task.name = saved_name
                            callback_sent = True
                            display.debug("sending task start callback")

                        self._blocked_hosts[host.get_name()] = True
                        self._queue_task(host, task, task_vars, play_context)
                        del task_vars

                    # if we're bypassing the host loop, break out now
                    if run_once:
                        break

                    results += self._process_pending_results(iterator, max_passes=max(1, int(len(self._tqm._workers) * 0.1)))

                # go to next host/task group
                if skip_rest:
                    continue

                display.debug("done queuing things up, now waiting for results queue to drain")
                if self._pending_results > 0:
                    results += self._wait_on_pending_results(iterator)
                host_results.extend(results)

                all_role_blocks = []
                for hr in results:
                    # handle include_role
                    if hr._task.action == 'include_role':
                        loop_var = None
                        if hr._task.loop:
                            loop_var = 'item'
                            if hr._task.loop_control:
                                loop_var = hr._task.loop_control.loop_var or 'item'
                            include_results = hr._result.get('results', [])
                        else:
                            include_results = [ hr._result ]

                        for include_result in include_results:
                            if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result and include_result['failed']:
                                continue

                            display.debug("generating all_blocks data for role")
                            new_ir = hr._task.copy()
                            new_ir.vars.update(include_result.get('include_variables', dict()))
                            if loop_var and loop_var in include_result:
                                new_ir.vars[loop_var] = include_result[loop_var]

                            all_role_blocks.extend(new_ir.get_block_list(play=iterator._play, variable_manager=self._variable_manager, loader=self._loader))

                if len(all_role_blocks) > 0:
                    for host in hosts_left:
                        iterator.add_tasks(host, all_role_blocks)

                try:
                    included_files = IncludedFile.process_include_results(
                        host_results,
                        self._tqm,
                        iterator=iterator,
                        inventory=self._inventory,
                        loader=self._loader,
                        variable_manager=self._variable_manager
                    )
                except AnsibleError as e:
                    # this is a fatal error, so we abort here regardless of block state
                    return self._tqm.RUN_ERROR

                include_failure = False
                if len(included_files) > 0:
                    display.debug("we have included files to process")
                    noop_task = Task()
                    noop_task.action = 'meta'
                    noop_task.args['_raw_params'] = 'noop'
                    noop_task.set_loader(iterator._play._loader)

                    display.debug("generating all_blocks data")
                    all_blocks = dict((host, []) for host in hosts_left)
                    display.debug("done generating all_blocks data")
                    for included_file in included_files:
                        display.debug("processing included file: %s" % included_file._filename)
                        # included hosts get the task list while those excluded get an equal-length
                        # list of noop tasks, to make sure that they continue running in lock-step
                        try:
                            new_blocks = self._load_included_file(included_file, iterator=iterator)

                            display.debug("iterating over new_blocks loaded from include file")
                            for new_block in new_blocks:
                                task_vars = self._variable_manager.get_vars(
                                    loader=self._loader,
                                    play=iterator._play,
                                    task=included_file._task,
                                )
                                display.debug("filtering new block on tags")
                                final_block = new_block.filter_tagged_tasks(play_context, task_vars)
                                display.debug("done filtering new block on tags")

                                noop_block = Block(parent_block=task._parent)
                                noop_block.block  = [noop_task for t in new_block.block]
                                noop_block.always = [noop_task for t in new_block.always]
                                noop_block.rescue = [noop_task for t in new_block.rescue]

                                for host in hosts_left:
                                    if host in included_file._hosts:
                                        all_blocks[host].append(final_block)
                                    else:
                                        all_blocks[host].append(noop_block)
                            display.debug("done iterating over new_blocks loaded from include file")

                        except AnsibleError as e:
                            for host in included_file._hosts:
                                self._tqm._failed_hosts[host.name] = True
                                iterator.mark_host_failed(host)
                            display.error(to_text(e), wrap_text=False)
                            include_failure = True
                            continue

                    # finally go through all of the hosts and append the
                    # accumulated blocks to their list of tasks
                    display.debug("extending task lists for all hosts with included blocks")

                    for host in hosts_left:
                        iterator.add_tasks(host, all_blocks[host])

                    display.debug("done extending task lists")
                    display.debug("done processing included files")

                display.debug("results queue empty")

                display.debug("checking for any_errors_fatal")
                failed_hosts = []
                unreachable_hosts = []
                for res in results:
                    if res.is_failed():
                        failed_hosts.append(res._host.name)
                    elif res.is_unreachable():
                        unreachable_hosts.append(res._host.name)

                # if any_errors_fatal and we had an error, mark all hosts as failed
                if any_errors_fatal and (len(failed_hosts) > 0 or len(unreachable_hosts) > 0):
                    for host in hosts_left:
                        (s, _) = iterator.get_next_task_for_host(host, peek=True)
                        if s.run_state != iterator.ITERATING_RESCUE or \
                           s.run_state == iterator.ITERATING_RESCUE and s.fail_state & iterator.FAILED_RESCUE != 0:
                            self._tqm._failed_hosts[host.name] = True
                            result |= self._tqm.RUN_FAILED_BREAK_PLAY
                display.debug("done checking for any_errors_fatal")

                display.debug("checking for max_fail_percentage")
                if iterator._play.max_fail_percentage is not None and len(results) > 0:
                    percentage = iterator._play.max_fail_percentage / 100.0

                    if (len(self._tqm._failed_hosts) / len(results)) > percentage:
                        for host in hosts_left:
                            # don't double-mark hosts, or the iterator will potentially
                            # fail them out of the rescue/always states
                            if host.name not in failed_hosts:
                                self._tqm._failed_hosts[host.name] = True
                                iterator.mark_host_failed(host)
                        self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
                        result |= self._tqm.RUN_FAILED_BREAK_PLAY
                display.debug("done checking for max_fail_percentage")

                display.debug("checking to see if all hosts have failed and the running result is not ok")
                if result != self._tqm.RUN_OK and len(self._tqm._failed_hosts) >= len(hosts_left):
                    display.debug("^ not ok, so returning result now")
                    self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
                    return result
                display.debug("done checking to see if all hosts have failed")

            except (IOError, EOFError) as e:
                display.debug("got IOError/EOFError in task loop: %s" % e)
                # most likely an abort, return failed
                return self._tqm.RUN_UNKNOWN_ERROR

        # run the base class run() method, which executes the cleanup function
        # and runs any outstanding handlers which have been triggered

        return super(StrategyModule, self).run(iterator, play_context, result)
Ejemplo n.º 53
0
 def test_load_task_simple(self):
     t = Task.load(basic_shell_task)
     assert t is not None
     self.assertEqual(t.name, basic_shell_task['name'])
     self.assertEqual(t.action, 'command')
     self.assertEqual(t.args, dict(_raw_params='echo hi', _uses_shell=True))
Ejemplo n.º 54
0
    def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
        self._play = play
        self._blocks = []

        self._task_uuid_cache = dict()

        # Default options to gather
        gather_subset = C.DEFAULT_GATHER_SUBSET
        gather_timeout = C.DEFAULT_GATHER_TIMEOUT

        # Retrieve subset to gather
        if self._play.gather_subset is not None:
            gather_subset = self._play.gather_subset
        # Retrieve timeout for gather
        if self._play.gather_timeout is not None:
            gather_timeout = self._play.gather_timeout

        setup_block = Block(play=self._play)
        setup_task = Task(block=setup_block)
        setup_task.action = "setup"
        setup_task.name = "Gathering Facts"
        setup_task.tags = ["always"]
        setup_task.args = {"gather_subset": gather_subset}
        if gather_timeout:
            setup_task.args["gather_timeout"] = gather_timeout
        setup_task.set_loader(self._play._loader)
        setup_block.block = [setup_task]

        setup_block = setup_block.filter_tagged_tasks(play_context, all_vars)
        self._blocks.append(setup_block)
        self.cache_block_tasks(setup_block)

        for block in self._play.compile():
            new_block = block.filter_tagged_tasks(play_context, all_vars)
            if new_block.has_tasks():
                self.cache_block_tasks(new_block)
                self._blocks.append(new_block)

        for handler_block in self._play.handlers:
            self.cache_block_tasks(handler_block)

        self._host_states = {}
        start_at_matched = False
        for host in inventory.get_hosts(self._play.hosts):
            self._host_states[host.name] = HostState(blocks=self._blocks)
            # if the host's name is in the variable manager's fact cache, then set
            # its _gathered_facts flag to true for smart gathering tests later
            if host.name in variable_manager._fact_cache and variable_manager._fact_cache.get("module_setup", False):
                host._gathered_facts = True
            # if we're looking to start at a specific task, iterate through
            # the tasks for this host until we find the specified task
            if play_context.start_at_task is not None and not start_at_done:
                while True:
                    (s, task) = self.get_next_task_for_host(host, peek=True)
                    if s.run_state == self.ITERATING_COMPLETE:
                        break
                    if (
                        task.name == play_context.start_at_task
                        or fnmatch.fnmatch(task.name, play_context.start_at_task)
                        or task.get_name() == play_context.start_at_task
                        or fnmatch.fnmatch(task.get_name(), play_context.start_at_task)
                    ):
                        start_at_matched = True
                        break
                    else:
                        self.get_next_task_for_host(host)

                # finally, reset the host's state to ITERATING_SETUP
                if start_at_matched:
                    self._host_states[host.name].did_start_at_task = True
                    self._host_states[host.name].run_state = self.ITERATING_SETUP

        if start_at_matched:
            # we have our match, so clear the start_at_task field on the
            # play context to flag that we've started at a task (and future
            # plays won't try to advance)
            play_context.start_at_task = None
Ejemplo n.º 55
0
 def test_load_task_kv_form(self):
     t = Task.load(kv_shell_task)
     print("task action is %s" % t.action)
     self.assertEqual(t.action, 'command')
     self.assertEqual(t.args, dict(_raw_params='echo hi', _uses_shell=True))
Ejemplo n.º 56
0
 def test_task_auto_name(self):
     assert 'name' not in kv_shell_task
     t = Task.load(kv_shell_task)
Ejemplo n.º 57
0
    def _get_next_task_lockstep(self, hosts, iterator):
        '''
        Returns a list of (host, task) tuples, where the task may
        be a noop task to keep the iterator in lock step across
        all hosts.
        '''

        noop_task = Task()
        noop_task.action = 'meta'
        noop_task.args['_raw_params'] = 'noop'
        noop_task.set_loader(iterator._play._loader)

        host_tasks = {}
        display.debug("building list of next tasks for hosts")
        for host in hosts:
            host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
        display.debug("done building task lists")

        num_setups = 0
        num_tasks  = 0
        num_rescue = 0
        num_always = 0

        lowest_cur_block = len(iterator._blocks)

        display.debug("counting tasks in each state of execution")
        for (k, v) in iteritems(host_tasks):
            if v is None:
                continue

            (s, t) = v
            if t is None:
                continue

            if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
                lowest_cur_block = s.cur_block

            if s.run_state == PlayIterator.ITERATING_SETUP:
                num_setups += 1
            elif s.run_state == PlayIterator.ITERATING_TASKS:
                num_tasks += 1
            elif s.run_state == PlayIterator.ITERATING_RESCUE:
                num_rescue += 1
            elif s.run_state == PlayIterator.ITERATING_ALWAYS:
                num_always += 1
        display.debug("done counting tasks in each state of execution")

        def _advance_selected_hosts(hosts, cur_block, cur_state):
            '''
            This helper returns the task for all hosts in the requested
            state, otherwise they get a noop dummy task. This also advances
            the state of the host, since the given states are determined
            while using peek=True.
            '''
            # we return the values in the order they were originally
            # specified in the given hosts array
            rvals = []
            display.debug("starting to advance hosts")
            for host in hosts:
                host_state_task = host_tasks[host.name]
                if host_state_task is None:
                    continue
                (s, t) = host_state_task
                if t is None:
                    continue
                if s.run_state == cur_state and s.cur_block == cur_block:
                    new_t = iterator.get_next_task_for_host(host)
                    rvals.append((host, t))
                else:
                    rvals.append((host, noop_task))
            display.debug("done advancing hosts to next task")
            return rvals

        # if any hosts are in ITERATING_SETUP, return the setup task
        # while all other hosts get a noop
        if num_setups:
            display.debug("advancing hosts in ITERATING_SETUP")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)

        # if any hosts are in ITERATING_TASKS, return the next normal
        # task for these hosts, while all other hosts get a noop
        if num_tasks:
            display.debug("advancing hosts in ITERATING_TASKS")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)

        # if any hosts are in ITERATING_RESCUE, return the next rescue
        # task for these hosts, while all other hosts get a noop
        if num_rescue:
            display.debug("advancing hosts in ITERATING_RESCUE")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)

        # if any hosts are in ITERATING_ALWAYS, return the next always
        # task for these hosts, while all other hosts get a noop
        if num_always:
            display.debug("advancing hosts in ITERATING_ALWAYS")
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)

        # at this point, everything must be ITERATING_COMPLETE, so we
        # return None for all hosts in the list
        display.debug("all hosts are done, so returning None's for all hosts")
        return [(host, None) for host in hosts]
Ejemplo n.º 58
0
    def get_next_task_for_host(self, host, peek=False, lock_step=True):
        s = self.get_host_state(host)

        task = None
        if s.run_state == self.ITERATING_COMPLETE:
            return None
        else:
            while True:
                try:
                    cur_block = s._blocks[s.cur_block]
                except IndexError:
                    s.run_state = self.ITERATING_COMPLETE
                    break

                if s.run_state == self.ITERATING_SETUP:
                    s.run_state = self.ITERATING_TASKS
                    if self._play._gather_facts == 'smart' and not host.gathered_facts or boolean(self._play._gather_facts):
                        # mark the host as having gathered facts
                        host.set_gathered_facts(True)

                        task = Task()
                        task.action = 'setup'
                        task.set_loader(self._play._loader)

                elif s.run_state == self.ITERATING_TASKS:
                    # clear the pending setup flag, since we're past that and it didn't fail
                    if s.pending_setup:
                        s.pending_setup = False

                    if s.fail_state & self.FAILED_TASKS == self.FAILED_TASKS:
                        s.run_state = self.ITERATING_RESCUE
                    elif s.cur_regular_task >= len(cur_block.block):
                        s.run_state = self.ITERATING_ALWAYS
                    else:
                        task = cur_block.block[s.cur_regular_task]
                        s.cur_regular_task += 1
                        break
                elif s.run_state == self.ITERATING_RESCUE:
                    if s.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE:
                        s.run_state = self.ITERATING_ALWAYS
                    elif s.cur_rescue_task >= len(cur_block.rescue):
                        if len(cur_block.rescue) > 0:
                            s.fail_state = self.FAILED_NONE
                        s.run_state = self.ITERATING_ALWAYS
                    else:
                        task = cur_block.rescue[s.cur_rescue_task]
                        s.cur_rescue_task += 1
                        break
                elif s.run_state == self.ITERATING_ALWAYS:
                    if s.cur_always_task >= len(cur_block.always):
                        if s.fail_state != self.FAILED_NONE:
                            s.run_state = self.ITERATING_COMPLETE
                            break
                        else:
                            s.cur_block += 1
                            s.cur_regular_task = 0
                            s.cur_rescue_task  = 0
                            s.cur_always_task  = 0
                            s.run_state = self.ITERATING_TASKS
                    else:
                        task= cur_block.always[s.cur_always_task]
                        s.cur_always_task += 1
                        break

        if task and task._role:
            # if we had a current role, mark that role as completed
            if s.cur_role and task._role != s.cur_role and s.cur_role._had_task_run and not peek:
                s.cur_role._completed = True

            s.cur_role = task._role

        if not peek:
            self._host_states[host.name] = s

        return (s, task)
Ejemplo n.º 59
0
    def _get_next_task_lockstep(self, hosts, iterator):
        '''
        Returns a list of (host, task) tuples, where the task may
        be a noop task to keep the iterator in lock step across
        all hosts.
        '''

        noop_task = Task()
        noop_task.action = 'meta'
        noop_task.args['_raw_params'] = 'noop'
        noop_task.set_loader(iterator._play._loader)

        host_tasks = {}
        for host in hosts:
            host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)

        num_setups = 0
        num_tasks  = 0
        num_rescue = 0
        num_always = 0

        lowest_cur_block = len(iterator._blocks)

        for (k, v) in host_tasks.iteritems():
            (s, t) = v
            if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
                lowest_cur_block = s.cur_block

            if s.run_state == PlayIterator.ITERATING_SETUP:
                num_setups += 1
            elif s.run_state == PlayIterator.ITERATING_TASKS:
                num_tasks += 1
            elif s.run_state == PlayIterator.ITERATING_RESCUE:
                num_rescue += 1
            elif s.run_state == PlayIterator.ITERATING_ALWAYS:
                num_always += 1

        def _advance_selected_hosts(hosts, cur_block, cur_state):
            '''
            This helper returns the task for all hosts in the requested
            state, otherwise they get a noop dummy task. This also advances
            the state of the host, since the given states are determined
            while using peek=True.
            '''
            # we return the values in the order they were originally
            # specified in the given hosts array
            rvals = []
            for host in hosts:
                (s, t) = host_tasks[host.name]
                if s.run_state == cur_state and s.cur_block == cur_block:
                    new_t = iterator.get_next_task_for_host(host)
                    #if new_t != t:
                    #    raise AnsibleError("iterator error, wtf?")
                    rvals.append((host, t))
                else:
                    rvals.append((host, noop_task))
            return rvals

        # if any hosts are in ITERATING_SETUP, return the setup task
        # while all other hosts get a noop
        if num_setups:
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)

        # if any hosts are in ITERATING_TASKS, return the next normal
        # task for these hosts, while all other hosts get a noop
        if num_tasks:
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)

        # if any hosts are in ITERATING_RESCUE, return the next rescue
        # task for these hosts, while all other hosts get a noop
        if num_rescue:
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)

        # if any hosts are in ITERATING_ALWAYS, return the next always
        # task for these hosts, while all other hosts get a noop
        if num_always:
            return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)

        # at this point, everything must be ITERATING_COMPLETE, so we
        # return None for all hosts in the list
        return [(host, None) for host in hosts]
Ejemplo n.º 60
0
def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
    '''
    Given a list of task datastructures (parsed from YAML),
    return a list of Task() or TaskInclude() objects.
    '''

    # we import here to prevent a circular dependency with imports
    from ansible.playbook.block import Block
    from ansible.playbook.handler import Handler
    from ansible.playbook.task import Task
    from ansible.playbook.task_include import TaskInclude
    from ansible.playbook.role_include import IncludeRole
    from ansible.playbook.handler_task_include import HandlerTaskInclude
    from ansible.template import Templar

    assert isinstance(ds, list)

    task_list = []
    for task_ds in ds:
        assert isinstance(task_ds, dict)

        if 'block' in task_ds:
            t = Block.load(
                task_ds,
                play=play,
                parent_block=block,
                role=role,
                task_include=task_include,
                use_handlers=use_handlers,
                variable_manager=variable_manager,
                loader=loader,
            )
            task_list.append(t)
        else:
            if 'include' in task_ds:
                if use_handlers:
                    include_class = HandlerTaskInclude
                else:
                    include_class = TaskInclude

                t = include_class.load(
                    task_ds,
                    block=block,
                    role=role,
                    task_include=None,
                    variable_manager=variable_manager,
                    loader=loader
                )

                all_vars = variable_manager.get_vars(loader=loader, play=play, task=t)
                templar = Templar(loader=loader, variables=all_vars)

                # check to see if this include is dynamic or static:
                # 1. the user has set the 'static' option to false or true
                # 2. one of the appropriate config options was set
                if t.static is not None:
                    is_static = t.static
                else:
                    is_static = C.DEFAULT_TASK_INCLUDES_STATIC or \
                                (use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC) or \
                                (not templar._contains_vars(t.args['_raw_params']) and t.all_parents_static() and not t.loop)

                if is_static:
                    if t.loop is not None:
                        raise AnsibleParserError("You cannot use 'static' on an include with a loop", obj=task_ds)

                    # we set a flag to indicate this include was static
                    t.statically_loaded = True

                    # handle relative includes by walking up the list of parent include
                    # tasks and checking the relative result to see if it exists
                    parent_include = block
                    cumulative_path = None

                    found = False
                    subdir = 'tasks'
                    if use_handlers:
                        subdir = 'handlers'
                    while parent_include is not None:
                        if not isinstance(parent_include, TaskInclude):
                            parent_include = parent_include._parent
                            continue
                        parent_include_dir = templar.template(os.path.dirname(parent_include.args.get('_raw_params')))
                        if cumulative_path is None:
                            cumulative_path = parent_include_dir
                        elif not os.path.isabs(cumulative_path):
                            cumulative_path = os.path.join(parent_include_dir, cumulative_path)
                        include_target = templar.template(t.args['_raw_params'])
                        if t._role:
                            new_basedir = os.path.join(t._role._role_path, subdir, cumulative_path)
                            include_file = loader.path_dwim_relative(new_basedir, subdir, include_target)
                        else:
                            include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)

                        if os.path.exists(include_file):
                            found = True
                            break
                        else:
                            parent_include = parent_include._parent

                    if not found:
                        try:
                            include_target = templar.template(t.args['_raw_params'])
                        except AnsibleUndefinedVariable:
                            raise AnsibleParserError(
                                      "Error when evaluating variable in include name: %s.\n\n" \
                                      "When using static includes, ensure that any variables used in their names are defined in vars/vars_files\n" \
                                      "or extra-vars passed in from the command line. Static includes cannot use variables from inventory\n" \
                                      "sources like group or host vars." % t.args['_raw_params'],
                                      obj=task_ds,
                                      suppress_extended_error=True,
                                  )
                        if t._role:
                            include_file = loader.path_dwim_relative(t._role._role_path, subdir, include_target)
                        else:
                            include_file = loader.path_dwim(include_target)

                    try:
                        data = loader.load_from_file(include_file)
                        if data is None:
                            return []
                        elif not isinstance(data, list):
                            raise AnsibleParserError("included task files must contain a list of tasks", obj=data)

                        # since we can't send callbacks here, we display a message directly in
                        # the same fashion used by the on_include callback. We also do it here,
                        # because the recursive nature of helper methods means we may be loading
                        # nested includes, and we want the include order printed correctly
                        display.vv("statically included: %s" % include_file)
                    except AnsibleFileNotFound:
                        if t.static or \
                           C.DEFAULT_TASK_INCLUDES_STATIC or \
                           C.DEFAULT_HANDLER_INCLUDES_STATIC and use_handlers:
                            raise
                        display.deprecated(
                            "Included file '%s' not found, however since this include is not " \
                            "explicitly marked as 'static: yes', we will try and include it dynamically " \
                            "later. In the future, this will be an error unless 'static: no' is used " \
                            "on the include task. If you do not want missing includes to be considered " \
                            "dynamic, use 'static: yes' on the include or set the global ansible.cfg " \
                            "options to make all inclues static for tasks and/or handlers" % include_file,
                        )
                        task_list.append(t)
                        continue

                    included_blocks = load_list_of_blocks(
                        data,
                        play=play,
                        parent_block=None,
                        task_include=t.copy(),
                        role=role,
                        use_handlers=use_handlers,
                        loader=loader,
                        variable_manager=variable_manager,
                    )

                    # pop tags out of the include args, if they were specified there, and assign
                    # them to the include. If the include already had tags specified, we raise an
                    # error so that users know not to specify them both ways
                    tags = t.vars.pop('tags', [])
                    if isinstance(tags, string_types):
                        tags = tags.split(',')

                    if len(tags) > 0:
                        if len(t.tags) > 0:
                            raise AnsibleParserError(
                                "Include tasks should not specify tags in more than one way (both via args and directly on the task). " \
                                "Mixing styles in which tags are specified is prohibited for whole import hierarchy, not only for single import statement",
                                obj=task_ds,
                                suppress_extended_error=True,
                            )
                        display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option")
                    else:
                        tags = t.tags[:]

                    # now we extend the tags on each of the included blocks
                    for b in included_blocks:
                        b.tags = list(set(b.tags).union(tags))
                    # END FIXME

                    # FIXME: handlers shouldn't need this special handling, but do
                    #        right now because they don't iterate blocks correctly
                    if use_handlers:
                        for b in included_blocks:
                            task_list.extend(b.block)
                    else:
                        task_list.extend(included_blocks)
                else:
                    task_list.append(t)

            elif 'include_role' in task_ds:

                ir = IncludeRole.load(
                            task_ds,
                            block=block,
                            role=role,
                            task_include=None,
                            variable_manager=variable_manager,
                            loader=loader
                     )

                #   1. the user has set the 'static' option to false or true
                #   2. one of the appropriate config options was set
                if ir.static is not None:
                    is_static = ir.static
                else:
                    display.debug('Determine if include_role is static')
                    # Check to see if this include is dynamic or static:
                    all_vars = variable_manager.get_vars(loader=loader, play=play, task=ir)
                    templar = Templar(loader=loader, variables=all_vars)
                    needs_templating = False
                    for param in ir.args:
                        if templar._contains_vars(ir.args[param]):
                            if not templar.templatable(ir.args[param]):
                                needs_templating = True
                                break
                    is_static = C.DEFAULT_TASK_INCLUDES_STATIC or \
                                (use_handlers and C.DEFAULT_HANDLER_INCLUDES_STATIC)  or \
                                (not needs_templating and ir.all_parents_static() and not ir.loop)
                    display.debug('Determined that if include_role static is %s' % str(is_static))
                if is_static:
                    # uses compiled list from object
                    t = task_list.extend(ir.get_block_list(variable_manager=variable_manager, loader=loader))
                else:
                    # passes task object itself for latter generation of list
                    t = task_list.append(ir)
            else:
                if use_handlers:
                    t = Handler.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
                else:
                    t = Task.load(task_ds, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)

                task_list.append(t)

    return task_list