def test_play_context_make_become_cmd(self): (options, args) = self._parser.parse_args([]) play_context = PlayContext(options=options) default_cmd = "/bin/foo" default_exe = "/bin/bash" sudo_exe = C.DEFAULT_SUDO_EXE or 'sudo' sudo_flags = C.DEFAULT_SUDO_FLAGS su_exe = C.DEFAULT_SU_EXE or 'su' su_flags = C.DEFAULT_SU_FLAGS or '' pbrun_exe = 'pbrun' pbrun_flags = '' pfexec_exe = 'pfexec' pfexec_flags = '' doas_exe = 'doas' doas_flags = ' -n -u foo ' ksu_exe = 'ksu' ksu_flags = '' dzdo_exe = 'dzdo' cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe) self.assertEqual(cmd, default_cmd) play_context.become = True play_context.become_user = '******' play_context.become_method = 'sudo' cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash") self.assertEqual( cmd, """%s %s -u %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags, play_context.become_user, default_exe, play_context.success_key, default_cmd)) play_context.become_pass = '******' cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe) self.assertEqual( cmd, """%s %s -p "%s" -u %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags.replace( '-n', ''), play_context.prompt, play_context.become_user, default_exe, play_context.success_key, default_cmd)) play_context.become_pass = None play_context.become_method = 'su' cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash") self.assertEqual( cmd, """%s %s -c '%s -c '"'"'echo %s; %s'"'"''""" % (su_exe, play_context.become_user, default_exe, play_context.success_key, default_cmd)) play_context.become_method = 'pbrun' cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash") self.assertEqual( cmd, """%s %s -u %s 'echo %s; %s'""" % (pbrun_exe, pbrun_flags, play_context.become_user, play_context.success_key, default_cmd)) play_context.become_method = 'pfexec' cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash") self.assertEqual( cmd, '''%s %s "'echo %s; %s'"''' % (pfexec_exe, pfexec_flags, play_context.success_key, default_cmd)) play_context.become_method = 'doas' cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash") self.assertEqual( cmd, """%s %s echo %s && %s %s env ANSIBLE=true %s""" % (doas_exe, doas_flags, play_context.success_key, doas_exe, doas_flags, default_cmd)) play_context.become_method = 'ksu' cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash") self.assertEqual( cmd, """%s %s %s -e %s -c 'echo %s; %s'""" % (ksu_exe, play_context.become_user, ksu_flags, default_exe, play_context.success_key, default_cmd)) play_context.become_method = 'bad' self.assertRaises(AnsibleError, play_context.make_become_cmd, cmd=default_cmd, executable="/bin/bash") play_context.become_method = 'dzdo' cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash") self.assertEqual( cmd, """%s -u %s %s -c 'echo %s; %s'""" % (dzdo_exe, play_context.become_user, default_exe, play_context.success_key, default_cmd)) play_context.become_pass = '******' play_context.become_method = 'dzdo' cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash") self.assertEqual( cmd, """%s -p %s -u %s %s -c 'echo %s; %s'""" % (dzdo_exe, pipes.quote( play_context.prompt), play_context.become_user, default_exe, play_context.success_key, default_cmd))
def test_action_base__make_tmp_path(self): # create our fake task mock_task = MagicMock() def get_shell_opt(opt): ret = None if opt == 'admin_users': ret = ['root', 'toor', 'Administrator'] elif opt == 'remote_tmp': ret = '~/.ansible/tmp' return ret # create a mock connection, so we don't actually try and connect to things mock_connection = MagicMock() mock_connection.transport = 'ssh' mock_connection._shell.mkdtemp.return_value = 'mkdir command' mock_connection._shell.join_path.side_effect = os.path.join mock_connection._shell.get_option = get_shell_opt mock_connection._shell.HOMES_RE = re.compile(r'(\'|\")?(~|\$HOME)(.*)') # we're using a real play context here play_context = PlayContext() play_context.become = True play_context.become_user = '******' # our test class action_base = DerivedActionBase( task=mock_task, connection=mock_connection, play_context=play_context, loader=None, templar=None, shared_loader_obj=None, ) action_base._low_level_execute_command = MagicMock() action_base._low_level_execute_command.return_value = dict( rc=0, stdout='/some/path') self.assertEqual(action_base._make_tmp_path('root'), '/some/path/') # empty path fails action_base._low_level_execute_command.return_value = dict(rc=0, stdout='') self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root') # authentication failure action_base._low_level_execute_command.return_value = dict(rc=5, stdout='') self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root') # ssh error action_base._low_level_execute_command.return_value = dict(rc=255, stdout='', stderr='') self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root') play_context.verbosity = 5 self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root') # general error action_base._low_level_execute_command.return_value = dict( rc=1, stdout='some stuff here', stderr='') self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root') action_base._low_level_execute_command.return_value = dict( rc=1, stdout='some stuff here', stderr='No space left on device') self.assertRaises(AnsibleError, action_base._make_tmp_path, 'root')
def run(self, play): ''' Iterates over the roles/tasks in a play, using the given (or default) strategy for queueing tasks. The default is the linear strategy, which operates like classic Ansible by keeping all hosts in lock-step with a given task (meaning no hosts move on to the next task until all hosts are done with the current task). ''' if not self._callbacks_loaded: self.load_callbacks() all_vars = self._variable_manager.get_vars(play=play) templar = Templar(loader=self._loader, variables=all_vars) warn_if_reserved(all_vars, templar.environment.globals.keys()) new_play = play.copy() new_play.post_validate(templar) new_play.handlers = new_play.compile_roles_handlers( ) + new_play.handlers self.hostvars = HostVars( inventory=self._inventory, variable_manager=self._variable_manager, loader=self._loader, ) play_context = PlayContext(new_play, self.passwords, self._connection_lockfile.fileno()) if (self._stdout_callback and hasattr(self._stdout_callback, 'set_play_context')): self._stdout_callback.set_play_context(play_context) for callback_plugin in self._callback_plugins: if hasattr(callback_plugin, 'set_play_context'): callback_plugin.set_play_context(play_context) self.send_callback('v2_playbook_on_play_start', new_play) # build the iterator iterator = PlayIterator( inventory=self._inventory, play=new_play, play_context=play_context, variable_manager=self._variable_manager, all_vars=all_vars, start_at_done=self._start_at_done, ) # adjust to # of workers to configured forks or size of batch, whatever is lower self._initialize_processes(min(self._forks, iterator.batch_size)) # load the specified strategy (or the default linear one) strategy = strategy_loader.get(new_play.strategy, self) if strategy is None: raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds) # Because the TQM may survive multiple play runs, we start by marking # any hosts as failed in the iterator here which may have been marked # as failed in previous runs. Then we clear the internal list of failed # hosts so we know what failed this round. for host_name in self._failed_hosts.keys(): host = self._inventory.get_host(host_name) iterator.mark_host_failed(host) for host_name in self._unreachable_hosts.keys(): iterator._play._removed_hosts.append(host_name) self.clear_failed_hosts() # during initialization, the PlayContext will clear the start_at_task # field to signal that a matching task was found, so check that here # and remember it so we don't try to skip tasks on future plays if context.CLIARGS.get( 'start_at_task' ) is not None and play_context.start_at_task is None: self._start_at_done = True # and run the play using the strategy and cleanup on way out try: play_return = strategy.run(iterator, play_context) finally: strategy.cleanup() self._cleanup_processes() # now re-save the hosts that failed from the iterator to our internal list for host_name in iterator.get_failed_hosts(): self._failed_hosts[host_name] = True return play_return
def main(): """ Called to initiate the connect to the remote device """ rc = 0 result = {} messages = list() socket_path = None # Need stdin as a byte stream if PY3: stdin = sys.stdin.buffer else: stdin = sys.stdin # Note: update the below log capture code after Display.display() is refactored. saved_stdout = sys.stdout sys.stdout = StringIO() try: # read the play context data via stdin, which means depickling it vars_data = read_stream(stdin) init_data = read_stream(stdin) if PY3: pc_data = cPickle.loads(init_data, encoding='bytes') variables = cPickle.loads(vars_data, encoding='bytes') else: pc_data = cPickle.loads(init_data) variables = cPickle.loads(vars_data) play_context = PlayContext() play_context.deserialize(pc_data) display.verbosity = play_context.verbosity except Exception as e: rc = 1 result.update({ 'error': to_text(e), 'exception': traceback.format_exc() }) if rc == 0: ssh = connection_loader.get('ssh', class_only=True) ansible_playbook_pid = sys.argv[1] task_uuid = sys.argv[2] cp = ssh._create_control_path(play_context.remote_addr, play_context.port, play_context.remote_user, play_context.connection, ansible_playbook_pid) # create the persistent connection dir if need be and create the paths # which we will be using later tmp_path = unfrackpath(C.PERSISTENT_CONTROL_PATH_DIR) makedirs_safe(tmp_path) socket_path = unfrackpath(cp % dict(directory=tmp_path)) lock_path = unfrackpath("%s/.ansible_pc_lock_%s" % os.path.split(socket_path)) with file_lock(lock_path): if not os.path.exists(socket_path): messages.append( ('vvvv', 'local domain socket does not exist, starting it')) original_path = os.getcwd() r, w = os.pipe() pid = fork_process() if pid == 0: try: os.close(r) wfd = os.fdopen(w, 'w') process = ConnectionProcess(wfd, play_context, socket_path, original_path, task_uuid, ansible_playbook_pid) process.start(variables) except Exception: messages.append(('error', traceback.format_exc())) rc = 1 if rc == 0: process.run() else: process.shutdown() sys.exit(rc) else: os.close(w) rfd = os.fdopen(r, 'r') data = json.loads(rfd.read(), cls=AnsibleJSONDecoder) messages.extend(data.pop('messages')) result.update(data) else: messages.append( ('vvvv', 'found existing local domain socket, using it!')) conn = Connection(socket_path) try: conn.set_options(var_options=variables) except ConnectionError as exc: messages.append(('debug', to_text(exc))) raise ConnectionError( 'Unable to decode JSON from response set_options. See the debug log for more information.' ) pc_data = to_text(init_data) try: conn.update_play_context(pc_data) conn.set_check_prompt(task_uuid) except Exception as exc: # Only network_cli has update_play context and set_check_prompt, so missing this is # not fatal e.g. netconf if isinstance(exc, ConnectionError) and getattr( exc, 'code', None) == -32601: pass else: result.update({ 'error': to_text(exc), 'exception': traceback.format_exc() }) if os.path.exists(socket_path): messages.extend(Connection(socket_path).pop_messages()) messages.append(('vvvv', sys.stdout.getvalue())) result.update({'messages': messages, 'socket_path': socket_path}) sys.stdout = saved_stdout if 'exception' in result: rc = 1 sys.stderr.write(json.dumps(result, cls=AnsibleJSONEncoder)) else: rc = 0 sys.stdout.write(json.dumps(result, cls=AnsibleJSONEncoder)) sys.exit(rc)
def test_network_cli_invalid_os(network_os): pc = PlayContext() pc.network_os = network_os with pytest.raises(AnsibleConnectionFailure): connection_loader.get("ansible.netcommon.network_cli", pc, "/dev/null")
def test_action_base__execute_module(self): # create our fake task mock_task = MagicMock() mock_task.action = 'copy' mock_task.args = dict(a=1, b=2, c=3) # create a mock connection, so we don't actually try and connect to things def build_module_command(env_string, shebang, cmd, arg_path=None, rm_tmp=None): to_run = [env_string, cmd] if arg_path: to_run.append(arg_path) if rm_tmp: to_run.append(rm_tmp) return " ".join(to_run) mock_connection = MagicMock() mock_connection.build_module_command.side_effect = build_module_command mock_connection._shell.get_remote_filename.return_value = 'copy.py' mock_connection._shell.join_path.side_effect = os.path.join # we're using a real play context here play_context = PlayContext() # our test class action_base = DerivedActionBase( task=mock_task, connection=mock_connection, play_context=play_context, loader=None, templar=None, shared_loader_obj=None, ) # fake a lot of methods as we test those elsewhere action_base._configure_module = MagicMock() action_base._supports_check_mode = MagicMock() action_base._late_needs_tmp_path = MagicMock() action_base._make_tmp_path = MagicMock() action_base._transfer_data = MagicMock() action_base._compute_environment_string = MagicMock() action_base._low_level_execute_command = MagicMock() action_base._fixup_perms2 = MagicMock() action_base._configure_module.return_value = ( 'new', '#!/usr/bin/python', 'this is the module data', 'path') action_base._late_needs_tmp_path.return_value = False action_base._compute_environment_string.return_value = '' action_base._connection.has_pipelining = True action_base._low_level_execute_command.return_value = dict( stdout='{"rc": 0, "stdout": "ok"}') self.assertEqual( action_base._execute_module(module_name=None, module_args=None), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok'])) self.assertEqual( action_base._execute_module(module_name='foo', module_args=dict(z=9, y=8, x=7), task_vars=dict(a=1)), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok'])) # test with needing/removing a remote tmp path action_base._configure_module.return_value = ( 'old', '#!/usr/bin/python', 'this is the module data', 'path') action_base._late_needs_tmp_path.return_value = True action_base._make_tmp_path.return_value = '/the/tmp/path' self.assertEqual( action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok'])) action_base._configure_module.return_value = ( 'non_native_want_json', '#!/usr/bin/python', 'this is the module data', 'path') self.assertEqual( action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok'])) play_context.become = True play_context.become_user = '******' self.assertEqual( action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok'])) # test an invalid shebang return action_base._configure_module.return_value = ( 'new', '', 'this is the module data', 'path') action_base._late_needs_tmp_path.return_value = False self.assertRaises(AnsibleError, action_base._execute_module) # test with check mode enabled, once with support for check # mode and once with support disabled to raise an error play_context.check_mode = True action_base._configure_module.return_value = ( 'new', '#!/usr/bin/python', 'this is the module data', 'path') self.assertEqual( action_base._execute_module(), dict(_ansible_parsed=True, rc=0, stdout="ok", stdout_lines=['ok'])) action_base._supports_check_mode = False self.assertRaises(AnsibleError, action_base._execute_module)
def setUp(self): self.play_context = PlayContext() self.play_context.prompt = ( '[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: ' ) self.in_stream = StringIO()
def test_network_cli__no_os(self): pc = PlayContext() pc.network_os = None self.assertRaises(AnsibleConnectionFailure, connection_loader.get, 'network_cli', pc, '/dev/null')
def test_play_iterator_nested_blocks(self): fake_loader = DictDataLoader({ "test_play.yml": """ - hosts: all gather_facts: false tasks: - block: - block: - block: - block: - block: - debug: msg="this is the first task" - ping: rescue: - block: - block: - block: - block: - debug: msg="this is the rescue task" always: - block: - block: - block: - block: - debug: msg="this is the always task" """, }) mock_var_manager = MagicMock() mock_var_manager._fact_cache = dict() mock_var_manager.get_vars.return_value = dict() p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager) hosts = [] for i in range(0, 10): host = MagicMock() host.name = host.get_name.return_value = 'host%02d' % i hosts.append(host) inventory = MagicMock() inventory.get_hosts.return_value = hosts inventory.filter_hosts.return_value = hosts play_context = PlayContext(play=p._entries[0]) itr = PlayIterator( inventory=inventory, play=p._entries[0], play_context=play_context, variable_manager=mock_var_manager, all_vars=dict(), ) # implicit meta: flush_handlers (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') self.assertEqual(task.args, dict(_raw_params='flush_handlers')) # get the first task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg='this is the first task')) # fail the host itr.mark_host_failed(hosts[0]) # get the resuce task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg='this is the rescue task')) # get the always task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg='this is the always task')) # implicit meta: flush_handlers (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') self.assertEqual(task.args, dict(_raw_params='flush_handlers')) # implicit meta: flush_handlers (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') self.assertEqual(task.args, dict(_raw_params='flush_handlers')) # end of iteration (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNone(task)
def test_action_base__fixup_perms2(self): mock_task = MagicMock() mock_connection = MagicMock() play_context = PlayContext() action_base = DerivedActionBase( task=mock_task, connection=mock_connection, play_context=play_context, loader=None, templar=None, shared_loader_obj=None, ) action_base._low_level_execute_command = MagicMock() remote_paths = ['/tmp/foo/bar.txt', '/tmp/baz.txt'] remote_user = '******' # Used for skipping down to common group dir. CHMOD_ACL_FLAGS = ('+a', 'A+user:remoteuser2:r:allow') def runWithNoExpectation(execute=False): return action_base._fixup_perms2(remote_paths, remote_user=remote_user, execute=execute) def assertSuccess(execute=False): self.assertEqual(runWithNoExpectation(execute), remote_paths) def assertThrowRegex(regex, execute=False): self.assertRaisesRegexp(AnsibleError, regex, action_base._fixup_perms2, remote_paths, remote_user=remote_user, execute=execute) def get_shell_option_for_arg(args_kv, default): '''A helper for get_shell_option. Returns a function that, if called with ``option`` that exists in args_kv, will return the value, else will return ``default`` for every other given arg''' def _helper(option, *args, **kwargs): return args_kv.get(option, default) return _helper action_base.get_become_option = MagicMock() action_base.get_become_option.return_value = 'remoteuser2' # Step 1: On windows, we just return remote_paths action_base._connection._shell._IS_WINDOWS = True assertSuccess(execute=False) assertSuccess(execute=True) # But if we're not on windows....we have more work to do. action_base._connection._shell._IS_WINDOWS = False # Step 2: We're /not/ becoming an unprivileged user action_base._remote_chmod = MagicMock() action_base._is_become_unprivileged = MagicMock() action_base._is_become_unprivileged.return_value = False # Two subcases: # - _remote_chmod rc is 0 # - _remote-chmod rc is not 0, something failed action_base._remote_chmod.return_value = { 'rc': 0, 'stdout': 'some stuff here', 'stderr': '', } assertSuccess(execute=True) # When execute=False, we just get the list back. But add it here for # completion. chmod is never called. assertSuccess() action_base._remote_chmod.return_value = { 'rc': 1, 'stdout': 'some stuff here', 'stderr': 'and here', } assertThrowRegex('Failed to set execute bit on remote files', execute=True) # Step 3: we are becoming unprivileged action_base._is_become_unprivileged.return_value = True # Step 3a: setfacl action_base._remote_set_user_facl = MagicMock() action_base._remote_set_user_facl.return_value = { 'rc': 0, 'stdout': '', 'stderr': '', } assertSuccess() # Step 3b: chmod +x if we need to # To get here, setfacl failed, so mock it as such. action_base._remote_set_user_facl.return_value = { 'rc': 1, 'stdout': '', 'stderr': '', } action_base._remote_chmod.return_value = { 'rc': 1, 'stdout': 'some stuff here', 'stderr': '', } assertThrowRegex('Failed to set file mode on remote temporary file', execute=True) action_base._remote_chmod.return_value = { 'rc': 0, 'stdout': 'some stuff here', 'stderr': '', } assertSuccess(execute=True) # Step 3c: chown action_base._remote_chown = MagicMock() action_base._remote_chown.return_value = { 'rc': 0, 'stdout': '', 'stderr': '', } assertSuccess() action_base._remote_chown.return_value = { 'rc': 1, 'stdout': '', 'stderr': '', } remote_user = '******' action_base._get_admin_users = MagicMock() action_base._get_admin_users.return_value = ['root'] assertThrowRegex('user would be unable to read the file.') remote_user = '******' # Step 3d: chmod +a on osx assertSuccess() action_base._remote_chmod.assert_called_with( ['remoteuser2 allow read'] + remote_paths, '+a') # This case can cause Solaris chmod to return 5 which the ssh plugin # treats as failure. To prevent a regression and ensure we still try the # rest of the cases below, we mock the thrown exception here. # This function ensures that only the macOS case (+a) throws this. def raise_if_plus_a(definitely_not_underscore, mode): if mode == '+a': raise AnsibleAuthenticationFailure() return {'rc': 0, 'stdout': '', 'stderr': ''} action_base._remote_chmod.side_effect = raise_if_plus_a assertSuccess() # Step 3e: chmod A+ on Solaris # We threw AnsibleAuthenticationFailure above, try Solaris fallback. # Based on our lambda above, it should be successful. action_base._remote_chmod.assert_called_with( remote_paths, 'A+user:remoteuser2:r:allow') assertSuccess() # Step 3f: Common group def rc_1_if_chmod_acl(definitely_not_underscore, mode): rc = 0 if mode in CHMOD_ACL_FLAGS: rc = 1 return {'rc': rc, 'stdout': '', 'stderr': ''} action_base._remote_chmod = MagicMock() action_base._remote_chmod.side_effect = rc_1_if_chmod_acl get_shell_option = action_base.get_shell_option action_base.get_shell_option = MagicMock() action_base.get_shell_option.side_effect = get_shell_option_for_arg( { 'common_remote_group': 'commongroup', }, None) action_base._remote_chgrp = MagicMock() action_base._remote_chgrp.return_value = { 'rc': 0, 'stdout': '', 'stderr': '', } # TODO: Add test to assert warning is shown if # ALLOW_WORLD_READABLE_TMPFILES is set in this case. assertSuccess() action_base._remote_chgrp.assert_called_once_with( remote_paths, 'commongroup') # Step 4: world-readable tmpdir action_base.get_shell_option.side_effect = get_shell_option_for_arg( { 'world_readable_temp': True, 'common_remote_group': None, }, None) action_base._remote_chmod.return_value = { 'rc': 0, 'stdout': 'some stuff here', 'stderr': '', } assertSuccess() action_base._remote_chmod = MagicMock() action_base._remote_chmod.return_value = { 'rc': 1, 'stdout': 'some stuff here', 'stderr': '', } assertThrowRegex('Failed to set file mode on remote files') # Otherwise if we make it here in this state, we hit the catch-all action_base.get_shell_option.side_effect = get_shell_option_for_arg( {}, None) assertThrowRegex('on the temporary files Ansible needs to create')
def test_network_cli__invalid_os(self): pc = PlayContext() pc.network_os = 'does not exist' self.assertRaises(AnsibleConnectionFailure, connection_loader.get, 'network_cli', pc, '/dev/null')
def test_play_iterator(self): fake_loader = DictDataLoader({ "test_play.yml": """ - hosts: all gather_facts: false roles: - test_role pre_tasks: - debug: msg="this is a pre_task" tasks: - debug: msg="this is a regular task" - block: - debug: msg="this is a block task" - block: - debug: msg="this is a sub-block in a block" rescue: - debug: msg="this is a rescue task" - block: - debug: msg="this is a sub-block in a rescue" always: - debug: msg="this is an always task" - block: - debug: msg="this is a sub-block in an always" post_tasks: - debug: msg="this is a post_task" """, '/etc/ansible/roles/test_role/tasks/main.yml': """ - name: role task debug: msg="this is a role task" - block: - name: role block task debug: msg="inside block in role" always: - name: role always task debug: msg="always task in block in role" - include: foo.yml - name: role task after include debug: msg="after include in role" - block: - name: starting role nested block 1 debug: - block: - name: role nested block 1 task 1 debug: - name: role nested block 1 task 2 debug: - name: role nested block 1 task 3 debug: - name: end of role nested block 1 debug: - name: starting role nested block 2 debug: - block: - name: role nested block 2 task 1 debug: - name: role nested block 2 task 2 debug: - name: role nested block 2 task 3 debug: - name: end of role nested block 2 debug: """, '/etc/ansible/roles/test_role/tasks/foo.yml': """ - name: role included task debug: msg="this is task in an include from a role" """ }) mock_var_manager = MagicMock() mock_var_manager._fact_cache = dict() mock_var_manager.get_vars.return_value = dict() p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager) hosts = [] for i in range(0, 10): host = MagicMock() host.name = host.get_name.return_value = 'host%02d' % i hosts.append(host) mock_var_manager._fact_cache['host00'] = dict() inventory = MagicMock() inventory.get_hosts.return_value = hosts inventory.filter_hosts.return_value = hosts play_context = PlayContext(play=p._entries[0]) itr = PlayIterator( inventory=inventory, play=p._entries[0], play_context=play_context, variable_manager=mock_var_manager, all_vars=dict(), ) # pre task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') # implicit meta: flush_handlers (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') # role task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.name, "role task") self.assertIsNotNone(task._role) # role block task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "role block task") self.assertIsNotNone(task._role) # role block always task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "role always task") self.assertIsNotNone(task._role) # role include task # (host_state, task) = itr.get_next_task_for_host(hosts[0]) # self.assertIsNotNone(task) # self.assertEqual(task.action, 'debug') # self.assertEqual(task.name, "role included task") # self.assertIsNotNone(task._role) # role task after include (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "role task after include") self.assertIsNotNone(task._role) # role nested block tasks (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "starting role nested block 1") self.assertIsNotNone(task._role) (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "role nested block 1 task 1") self.assertIsNotNone(task._role) (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "role nested block 1 task 2") self.assertIsNotNone(task._role) (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "role nested block 1 task 3") self.assertIsNotNone(task._role) (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "end of role nested block 1") self.assertIsNotNone(task._role) (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "starting role nested block 2") self.assertIsNotNone(task._role) (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "role nested block 2 task 1") self.assertIsNotNone(task._role) (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "role nested block 2 task 2") self.assertIsNotNone(task._role) (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "role nested block 2 task 3") self.assertIsNotNone(task._role) (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.name, "end of role nested block 2") self.assertIsNotNone(task._role) # implicit meta: role_complete (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') self.assertIsNotNone(task._role) # regular play task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertIsNone(task._role) # block task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg="this is a block task")) # sub-block task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg="this is a sub-block in a block")) # mark the host failed itr.mark_host_failed(hosts[0]) # block rescue task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg="this is a rescue task")) # sub-block rescue task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg="this is a sub-block in a rescue")) # block always task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg="this is an always task")) # sub-block always task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg="this is a sub-block in an always")) # implicit meta: flush_handlers (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') # post task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') # implicit meta: flush_handlers (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') # end of iteration (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNone(task) # host 0 shouldn't be in the failed hosts, as the error # was handled by a rescue block failed_hosts = itr.get_failed_hosts() self.assertNotIn(hosts[0], failed_hosts)
def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() results = super(ActionModule, self).run(tmp, task_vars) args = self._task.args.copy() dest = args.pop('dest', None) force = args.pop('force', True) base64 = args.pop('base64', True) new_module_args = { 'dest': dest, 'force': force, } copy_attributes = [ 'attributes', 'backup', 'checksum', 'delimiter', 'directory_mode', 'follow', 'group', 'local_follow', 'mode', 'owner', 'regexp', 'selevel', 'serole', 'setype', 'seuser', 'unsafe_writes', 'validate', ] for attribute in copy_attributes: value = args.pop(attribute, None) if value is not None: new_module_args[attribute] = value become = self._play_context.become become_method = self._play_context.become_method old_connection = self._connection self._connection = self._shared_loader_obj.connection_loader.get('local', PlayContext(), old_connection._new_stdin) self._play_context.become = False self._play_context.become_method = None results = merge_hash( results, # executes hashivault_read module on localhost self._execute_module(module_name='hashivault_read', tmp=tmp, task_vars=task_vars, module_args=args) ) if 'failed' in results and results['failed'] is True: return results content = results.pop('value', None) if content is None: results['failed'] = True results['rc'] = 1 results['msg'] = u'Could not find file `%s` in secret `%s`' % (args['key'], args['secret']) return results # write to temp file on ansible host to copy to remote host local_tmp = tempfile.NamedTemporaryFile(delete=False) if base64: try: content = base64encode.b64decode(content) except Exception as ex: results['failed'] = True results['rc'] = 1 secret_key = str(args.pop('secret', 'secret')) + "/" + str(args.pop('key', '')) results['msg'] = u'Error base64 decoding secret %s: %s' % (secret_key, str(ex)) return results else: try: import sys if sys.version_info[0] > 2: content = bytes(content, 'utf-8') else: content = bytes(content) except Exception as ex: results['failed'] = True results['rc'] = 1 secret_key = str(args.pop('secret', 'secret')) + "/" + str(args.pop('key', '')) results['msg'] = u'Error preparing utf-8 secret %s: %s' % (secret_key, str(ex)) return results local_tmp.write(content) local_tmp.close() new_module_args['src'] = local_tmp.name self._update_module_args('copy', new_module_args, task_vars) # `copy` module uses an action plugin, so we have to execute # the plugin instead of directly executing the module copy_action = self._get_copy_action_plugin(old_connection) copy_action._task.args = new_module_args copy_action._play_context.become = become copy_action._play_context.become_method = become_method results = merge_hash( results, # executes copy action plugin/module on remote host copy_action.run(task_vars=task_vars) ) # remove temp file os.unlink(local_tmp.name) if force is False and results['changed'] is False: results['failed'] = True results['rc'] = 1 results['msg'] = u'File %s already exists. Use `force: true` to overwrite' % dest return results
def plugin(): task = FakeTask('openshift_health_check', {'checks': ['fake_check']}) plugin = ActionModule(task, None, PlayContext(), None, None, None) return plugin
def test_play_context(self): (options, args) = self._parser.parse_args(['-vv', '--check']) play_context = PlayContext(options=options) self.assertEqual(play_context.connection, 'smart') self.assertEqual(play_context.remote_addr, None) self.assertEqual(play_context.remote_user, None) self.assertEqual(play_context.password, '') self.assertEqual(play_context.port, None) self.assertEqual(play_context.private_key_file, C.DEFAULT_PRIVATE_KEY_FILE) self.assertEqual(play_context.timeout, C.DEFAULT_TIMEOUT) self.assertEqual(play_context.shell, None) self.assertEqual(play_context.verbosity, 2) self.assertEqual(play_context.check_mode, True) self.assertEqual(play_context.no_log, None) mock_play = MagicMock() mock_play.connection = 'mock' mock_play.remote_user = '******' mock_play.port = 1234 mock_play.become = True mock_play.become_method = 'mock' mock_play.become_user = '******' mock_play.no_log = True play_context = PlayContext(play=mock_play, options=options) self.assertEqual(play_context.connection, 'mock') self.assertEqual(play_context.remote_user, 'mock') self.assertEqual(play_context.password, '') self.assertEqual(play_context.port, 1234) self.assertEqual(play_context.become, True) self.assertEqual(play_context.become_method, "mock") self.assertEqual(play_context.become_user, "mockroot") mock_task = MagicMock() mock_task.connection = 'mocktask' mock_task.remote_user = '******' mock_task.no_log = mock_play.no_log mock_task.become = True mock_task.become_method = 'mocktask' mock_task.become_user = '******' mock_task.become_pass = '******' mock_task._local_action = False mock_task.delegate_to = None all_vars = dict( ansible_connection='mock_inventory', ansible_ssh_port=4321, ) mock_templar = MagicMock() play_context = PlayContext(play=mock_play, options=options) play_context = play_context.set_task_and_variable_override( task=mock_task, variables=all_vars, templar=mock_templar) self.assertEqual(play_context.connection, 'mock_inventory') self.assertEqual(play_context.remote_user, 'mocktask') self.assertEqual(play_context.port, 4321) self.assertEqual(play_context.no_log, True) self.assertEqual(play_context.become, True) self.assertEqual(play_context.become_method, "mocktask") self.assertEqual(play_context.become_user, "mocktaskroot") self.assertEqual(play_context.become_pass, "mocktaskpass") mock_task.no_log = False play_context = play_context.set_task_and_variable_override( task=mock_task, variables=all_vars, templar=mock_templar) self.assertEqual(play_context.no_log, False)
def test_play_iterator_add_tasks(self): fake_loader = DictDataLoader({ 'test_play.yml': """ - hosts: all gather_facts: no tasks: - debug: msg="dummy task" """, }) mock_var_manager = MagicMock() mock_var_manager._fact_cache = dict() mock_var_manager.get_vars.return_value = dict() p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager) hosts = [] for i in range(0, 10): host = MagicMock() host.name = host.get_name.return_value = 'host%02d' % i hosts.append(host) inventory = MagicMock() inventory.get_hosts.return_value = hosts inventory.filter_hosts.return_value = hosts play_context = PlayContext(play=p._entries[0]) itr = PlayIterator( inventory=inventory, play=p._entries[0], play_context=play_context, variable_manager=mock_var_manager, all_vars=dict(), ) # test the high-level add_tasks() method s = HostState(blocks=[0, 1, 2]) itr._insert_tasks_into_state = MagicMock(return_value=s) itr.add_tasks(hosts[0], [MagicMock(), MagicMock(), MagicMock()]) self.assertEqual(itr._host_states[hosts[0].name], s) # now actually test the lower-level method that does the work itr = PlayIterator( inventory=inventory, play=p._entries[0], play_context=play_context, variable_manager=mock_var_manager, all_vars=dict(), ) # iterate past first task _, task = itr.get_next_task_for_host(hosts[0]) while (task and task.action != 'debug'): _, task = itr.get_next_task_for_host(hosts[0]) if task is None: raise Exception( "iterated past end of play while looking for place to insert tasks" ) # get the current host state and copy it so we can mutate it s = itr.get_host_state(hosts[0]) s_copy = s.copy() # assert with an empty task list, or if we're in a failed state, we simply return the state as-is res_state = itr._insert_tasks_into_state(s_copy, task_list=[]) self.assertEqual(res_state, s_copy) s_copy.fail_state = itr.FAILED_TASKS res_state = itr._insert_tasks_into_state(s_copy, task_list=[MagicMock()]) self.assertEqual(res_state, s_copy) # but if we've failed with a rescue/always block mock_task = MagicMock() s_copy.run_state = itr.ITERATING_RESCUE res_state = itr._insert_tasks_into_state(s_copy, task_list=[mock_task]) self.assertEqual(res_state, s_copy) self.assertIn(mock_task, res_state._blocks[res_state.cur_block].rescue) itr._host_states[hosts[0].name] = res_state (next_state, next_task) = itr.get_next_task_for_host(hosts[0], peek=True) self.assertEqual(next_task, mock_task) itr._host_states[hosts[0].name] = s # test a regular insertion s_copy = s.copy() res_state = itr._insert_tasks_into_state(s_copy, task_list=[MagicMock()])
def test_libssh_put_file_not_exist(self, mocked_super): pc = PlayContext() conn = connection_loader.get("ansible.netcommon.libssh", pc, "/dev/null") with self.assertRaises(AnsibleFileNotFound): conn.put_file(in_path="", out_path="")
def test_play_iterator(self): #import epdb; epdb.st() fake_loader = DictDataLoader({ "test_play.yml": """ - hosts: all gather_facts: false roles: - test_role pre_tasks: - debug: msg="this is a pre_task" tasks: - debug: msg="this is a regular task" - block: - debug: msg="this is a block task" - block: - debug: msg="this is a sub-block in a block" rescue: - debug: msg="this is a rescue task" - block: - debug: msg="this is a sub-block in a rescue" always: - debug: msg="this is an always task" - block: - debug: msg="this is a sub-block in an always" post_tasks: - debug: msg="this is a post_task" """, '/etc/ansible/roles/test_role/tasks/main.yml': """ - debug: msg="this is a role task" """, }) mock_var_manager = MagicMock() mock_var_manager._fact_cache = dict() mock_var_manager.get_vars.return_value = dict() p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager) hosts = [] for i in range(0, 10): host = MagicMock() host.name = host.get_name.return_value = 'host%02d' % i hosts.append(host) mock_var_manager._fact_cache['host00'] = dict() inventory = MagicMock() inventory.get_hosts.return_value = hosts inventory.filter_hosts.return_value = hosts play_context = PlayContext(play=p._entries[0]) itr = PlayIterator( inventory=inventory, play=p._entries[0], play_context=play_context, variable_manager=mock_var_manager, all_vars=dict(), ) # lookup up an original task target_task = p._entries[0].tasks[0].block[0] task_copy = target_task.copy(exclude_parent=True) found_task = itr.get_original_task(hosts[0], task_copy) self.assertEqual(target_task, found_task) bad_task = Task() found_task = itr.get_original_task(hosts[0], bad_task) self.assertIsNone(found_task) # pre task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') # implicit meta: flush_handlers (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') # role task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertIsNotNone(task._role) # regular play task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertIsNone(task._role) # block task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg="this is a block task")) # sub-block task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg="this is a sub-block in a block")) # mark the host failed itr.mark_host_failed(hosts[0]) # block rescue task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg="this is a rescue task")) # sub-block rescue task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg="this is a sub-block in a rescue")) # block always task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg="this is an always task")) # sub-block always task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') self.assertEqual(task.args, dict(msg="this is a sub-block in an always")) # implicit meta: flush_handlers (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') # post task (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'debug') # implicit meta: flush_handlers (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNotNone(task) self.assertEqual(task.action, 'meta') # end of iteration (host_state, task) = itr.get_next_task_for_host(hosts[0]) self.assertIsNone(task) # host 0 shouldn't be in the failed hosts, as the error # was handled by a rescue block failed_hosts = itr.get_failed_hosts() self.assertNotIn(hosts[0], failed_hosts)
def test_action_base__configure_module(self): fake_loader = DictDataLoader({}) # create our fake task mock_task = MagicMock() mock_task.action = "copy" # create a mock connection, so we don't actually try and connect to things mock_connection = MagicMock() # create a mock shared loader object def mock_find_plugin(name, options): if name == 'badmodule': return None elif '.ps1' in options: return '/fake/path/to/%s.ps1' % name else: return '/fake/path/to/%s' % name mock_module_loader = MagicMock() mock_module_loader.find_plugin.side_effect = mock_find_plugin mock_shared_obj_loader = MagicMock() mock_shared_obj_loader.module_loader = mock_module_loader # we're using a real play context here play_context = PlayContext() # our test class action_base = DerivedActionBase( task=mock_task, connection=mock_connection, play_context=play_context, loader=fake_loader, templar=None, shared_loader_obj=mock_shared_obj_loader, ) # test python module formatting with patch.object( builtins, 'open', mock_open(read_data=to_bytes(python_module_replacers.strip(), encoding='utf-8'))) as m: with patch.object(os, 'rename') as m: mock_task.args = dict(a=1, foo='fö〩') mock_connection.module_implementation_preferences = ('', ) (style, shebang, data, path) = action_base._configure_module(mock_task.action, mock_task.args) self.assertEqual(style, "new") self.assertEqual(shebang, u"#!/usr/bin/python") # test module not found self.assertRaises(AnsibleError, action_base._configure_module, 'badmodule', mock_task.args) # test powershell module formatting with patch.object( builtins, 'open', mock_open(read_data=to_bytes( powershell_module_replacers.strip(), encoding='utf-8'))): mock_task.action = 'win_copy' mock_task.args = dict(b=2) mock_connection.module_implementation_preferences = ('.ps1', ) (style, shebang, data, path) = action_base._configure_module('stat', mock_task.args) self.assertEqual(style, "new") self.assertEqual(shebang, u'#!powershell') # test module not found self.assertRaises(AnsibleError, action_base._configure_module, 'badmodule', mock_task.args)
def run(self, play): ''' Iterates over the roles/tasks in a play, using the given (or default) strategy for queueing tasks. The default is the linear strategy, which operates like classic Ansible by keeping all hosts in lock-step with a given task (meaning no hosts move on to the next task until all hosts are done with the current task). ''' if not self._callbacks_loaded: self.load_callbacks() all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) self.hostvars = HostVars( inventory=self._inventory, variable_manager=self._variable_manager, loader=self._loader, ) # Fork # of forks, # of hosts or serial, whichever is lowest contenders = [ self._options.forks, play.serial, len(self._inventory.get_hosts(new_play.hosts)) ] contenders = [v for v in contenders if v is not None and v > 0] self._initialize_processes(min(contenders)) play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno()) for callback_plugin in self._callback_plugins: if hasattr(callback_plugin, 'set_play_context'): callback_plugin.set_play_context(play_context) self.send_callback('v2_playbook_on_play_start', new_play) # initialize the shared dictionary containing the notified handlers self._initialize_notified_handlers(new_play.handlers) # load the specified strategy (or the default linear one) strategy = strategy_loader.get(new_play.strategy, self) if strategy is None: raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds) # build the iterator iterator = PlayIterator( inventory=self._inventory, play=new_play, play_context=play_context, variable_manager=self._variable_manager, all_vars=all_vars, start_at_done=self._start_at_done, ) # during initialization, the PlayContext will clear the start_at_task # field to signal that a matching task was found, so check that here # and remember it so we don't try to skip tasks on future plays if getattr(self._options, 'start_at_task', None) is not None and play_context.start_at_task is None: self._start_at_done = True # and run the play using the strategy and cleanup on way out play_return = strategy.run(iterator, play_context) self._cleanup_processes() return play_return
def run(self, play): ''' Iterates over the roles/tasks in a play, using the given (or default) strategy for queueing tasks. The default is the linear strategy, which operates like classic Ansible by keeping all hosts in lock-step with a given task (meaning no hosts move on to the next task until all hosts are done with the current task). ''' if not self._callbacks_loaded: self.load_callbacks() all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) warn_if_reserved(all_vars) templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) new_play.handlers = new_play.compile_roles_handlers( ) + new_play.handlers self.hostvars = HostVars( inventory=self._inventory, variable_manager=self._variable_manager, loader=self._loader, ) # Fork # of forks, # of hosts or serial, whichever is lowest num_hosts = len( self._inventory.get_hosts(new_play.hosts, ignore_restrictions=True)) max_serial = 0 if new_play.serial: # the play has not been post_validated here, so we may need # to convert the scalar value to a list at this point serial_items = new_play.serial if not isinstance(serial_items, list): serial_items = [serial_items] max_serial = max([pct_to_int(x, num_hosts) for x in serial_items]) contenders = [self._options.forks, max_serial, num_hosts] contenders = [v for v in contenders if v is not None and v > 0] self._initialize_processes(min(contenders)) play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno()) for callback_plugin in self._callback_plugins: if hasattr(callback_plugin, 'set_play_context'): callback_plugin.set_play_context(play_context) self.send_callback('v2_playbook_on_play_start', new_play) # initialize the shared dictionary containing the notified handlers self._initialize_notified_handlers(new_play) # load the specified strategy (or the default linear one) strategy = strategy_loader.get(new_play.strategy, self) if strategy is None: raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds) # build the iterator iterator = PlayIterator( inventory=self._inventory, play=new_play, play_context=play_context, variable_manager=self._variable_manager, all_vars=all_vars, start_at_done=self._start_at_done, ) # Because the TQM may survive multiple play runs, we start by marking # any hosts as failed in the iterator here which may have been marked # as failed in previous runs. Then we clear the internal list of failed # hosts so we know what failed this round. for host_name in self._failed_hosts.keys(): host = self._inventory.get_host(host_name) iterator.mark_host_failed(host) self.clear_failed_hosts() # during initialization, the PlayContext will clear the start_at_task # field to signal that a matching task was found, so check that here # and remember it so we don't try to skip tasks on future plays if getattr(self._options, 'start_at_task', None) is not None and play_context.start_at_task is None: self._start_at_done = True # and run the play using the strategy and cleanup on way out play_return = strategy.run(iterator, play_context) # now re-save the hosts that failed from the iterator to our internal list for host_name in iterator.get_failed_hosts(): self._failed_hosts[host_name] = True strategy.cleanup() self._cleanup_processes() return play_return
def run(self): super(PlaybookCLI, self).run() # Note: slightly wrong, this is written so that implicit localhost # Manage passwords sshpass = None becomepass = None passwords = {} if self.options.role_file: args = ["install"] if self.options.ignore_errors: args.append("--ignore-errors") force = "" if self.options.force: args.append("--force") if self.options.roles_path: args.extend(["--roles-path", self.options.roles_path]) if self.options.no_deps: args.append("--no-deps") args.extend(["--role-file", self.options.role_file]) gc = GalaxyCLI(args=args) gc.parse() gc.run() # initial error check, to make sure all specified playbooks are accessible # before we start running anything through the playbook executor for playbook in self.args: if not os.path.exists(playbook): raise AnsibleError("the playbook: %s could not be found" % playbook) if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)): raise AnsibleError( "the playbook: %s does not appear to be a file" % playbook) # don't deal with privilege escalation or passwords when we don't need to if not self.options.listhosts and not self.options.listtasks and not self.options.listtags and not self.options.syntax: self.normalize_become_options() (sshpass, becomepass) = self.ask_passwords() passwords = {'conn_pass': sshpass, 'become_pass': becomepass} loader, inventory, variable_manager = self._play_prereqs(self.options) # (which is not returned in list_hosts()) is taken into account for # warning if inventory is empty. But it can't be taken into account for # checking if limit doesn't match any hosts. Instead we don't worry about # limit if only implicit localhost was in inventory to start with. # # Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts()) no_hosts = False if len(inventory.list_hosts()) == 0: # Empty inventory display.warning( "provided hosts list is empty, only localhost is available") no_hosts = True inventory.subset(self.options.subset) if len(inventory.list_hosts()) == 0 and no_hosts is False: # Invalid limit raise AnsibleError("Specified --limit does not match any hosts") # flush fact cache if requested if self.options.flush_cache: self._flush_cache(inventory, variable_manager) # create the playbook executor, which manages running the plays via a task queue manager pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options, passwords=passwords) results = pbex.run() if isinstance(results, list): for p in results: display.display('\nplaybook: %s' % p['playbook']) for idx, play in enumerate(p['plays']): if play._included_path is not None: loader.set_basedir(play._included_path) else: pb_dir = os.path.realpath( os.path.dirname(p['playbook'])) loader.set_basedir(pb_dir) msg = "\n play #%d (%s): %s" % (idx + 1, ','.join( play.hosts), play.name) mytags = set(play.tags) msg += '\tTAGS: [%s]' % (','.join(mytags)) if self.options.listhosts: playhosts = set(inventory.get_hosts(play.hosts)) msg += "\n pattern: %s\n hosts (%d):" % ( play.hosts, len(playhosts)) for host in playhosts: msg += "\n %s" % host display.display(msg) all_tags = set() if self.options.listtags or self.options.listtasks: taskmsg = '' if self.options.listtasks: taskmsg = ' tasks:\n' def _process_block(b): taskmsg = '' for task in b.block: if isinstance(task, Block): taskmsg += _process_block(task) else: if task.action == 'meta': continue all_tags.update(task.tags) if self.options.listtasks: cur_tags = list( mytags.union(set(task.tags))) cur_tags.sort() if task.name: taskmsg += " %s" % task.get_name( ) else: taskmsg += " %s" % task.action taskmsg += "\tTAGS: [%s]\n" % ', '.join( cur_tags) return taskmsg all_vars = variable_manager.get_vars(play=play) play_context = PlayContext(play=play, options=self.options) for block in play.compile(): block = block.filter_tagged_tasks( play_context, all_vars) if not block.has_tasks(): continue taskmsg += _process_block(block) if self.options.listtags: cur_tags = list(mytags.union(all_tags)) cur_tags.sort() taskmsg += " TASK TAGS: [%s]\n" % ', '.join( cur_tags) display.display(taskmsg) return 0 else: return results
def run(self): super(PlaybookCLI, self).run() # Note: slightly wrong, this is written so that implicit localhost # Manage passwords sshpass = None becomepass = None vault_pass = None passwords = {} # don't deal with privilege escalation or passwords when we don't need to if not self.options.listhosts and not self.options.listtasks and not self.options.listtags and not self.options.syntax: self.normalize_become_options() (sshpass, becomepass) = self.ask_passwords() passwords = { 'conn_pass': sshpass, 'become_pass': becomepass } loader = DataLoader() if self.options.vault_password_file: # read vault_pass from a file vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=loader) loader.set_vault_password(vault_pass) elif self.options.ask_vault_pass: vault_pass = self.ask_vault_passwords()[0] loader.set_vault_password(vault_pass) # initial error check, to make sure all specified playbooks are accessible # before we start running anything through the playbook executor for playbook in self.args: if not os.path.exists(playbook): raise AnsibleError("the playbook: %s could not be found" % playbook) if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)): raise AnsibleError("the playbook: %s does not appear to be a file" % playbook) # create the variable manager, which will be shared throughout # the code, ensuring a consistent view of global variables variable_manager = VariableManager() variable_manager.extra_vars = load_extra_vars(loader=loader, options=self.options) variable_manager.options_vars = load_options_vars(self.options) # create the inventory, and filter it based on the subset specified (if any) inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory) variable_manager.set_inventory(inventory) # (which is not returned in list_hosts()) is taken into account for # warning if inventory is empty. But it can't be taken into account for # checking if limit doesn't match any hosts. Instead we don't worry about # limit if only implicit localhost was in inventory to start with. # # Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts()) no_hosts = False if len(inventory.list_hosts()) == 0: # Empty inventory # display.warning("provided hosts list is empty, only localhost is available") no_hosts = True inventory.subset(self.options.subset) if len(inventory.list_hosts()) == 0 and no_hosts is False: # Invalid limit raise AnsibleError("Specified --limit does not match any hosts") # flush fact cache if requested if self.options.flush_cache: for host in inventory.list_hosts(): variable_manager.clear_facts(host) # create the playbook executor, which manages running the plays via a task queue manager pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options, passwords=passwords) results = pbex.run() result_playbooks_object = [] result_json_object = {'playbooks': result_playbooks_object} if isinstance(results, list): for p in results: result_plays = [] result_playbook_object = { 'name' : p['playbook'], 'plays' : result_plays } result_playbooks_object.append(result_playbook_object) if not self.options.listtasksjson: display.display('\nplaybook: %s' % p['playbook']) for idx, play in enumerate(p['plays']): msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name) mytags = set(play.tags) msg += '\tTAGS: [%s]' % (','.join(mytags)) result_tasks = [] result_play_object = { 'name': play.name, 'pattern': play.hosts, 'tags': [x.encode('UTF8') for x in mytags], 'tasks': result_tasks } result_plays.append(result_play_object) if self.options.listhosts: playhosts = set(inventory.get_hosts(play.hosts)) msg += "\n pattern: %s\n hosts (%d):" % (play.hosts, len(playhosts)) for host in playhosts: msg += "\n %s" % host result_play_object['hosts'] = [str(x) for x in playhosts] if not self.options.listtasksjson: display.display(msg) all_tags = set() if self.options.listtags or self.options.listtasks: taskmsg = '' if self.options.listtasks: taskmsg = ' tasks:\n' def _process_block(b): taskmsg = '' for task in b.block: if isinstance(task, Block): taskmsg += _process_block(task) else: if task.action == 'meta': continue all_tags.update(task.tags) if self.options.listtasks: cur_tags = list(mytags.union(set(task.tags))) cur_tags.sort() result_task_object = {'tags':[x.encode('UTF8') for x in cur_tags]} result_tasks.append(result_task_object) if task.name: taskmsg += " %s" % task.get_name() result_task_object['name'] = task.get_name().encode('UTF8') else: taskmsg += " %s" % task.action taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags) return taskmsg all_vars = variable_manager.get_vars(loader=loader, play=play) play_context = PlayContext(play=play, options=self.options) for block in play.compile(): block = block.filter_tagged_tasks(play_context, all_vars) if not block.has_tasks(): continue taskmsg += _process_block(block) if self.options.listtags: cur_tags = list(mytags.union(all_tags)) cur_tags.sort() taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags) if not self.options.listtasksjson: display.display(taskmsg) if self.options.listtasksjson: print(json.dumps(result_json_object, indent=4, sort_keys=True)) return 0 else: return results
def plugin_fixture(): pc = PlayContext() pc.network_os = "ios" conn = connection_loader.get("ansible.netcommon.network_cli", pc, "/dev/null") return conn