def ask_passwords(): ''' prompt for connection and become passwords if needed ''' op = context.CLIARGS sshpass = None becomepass = None become_prompt = '' become_prompt_method = "BECOME" if C.AGNOSTIC_BECOME_PROMPT else op['become_method'].upper() try: if op['ask_pass']: sshpass = getpass.getpass(prompt="SSH password: "******"%s password[defaults to SSH password]: " % become_prompt_method else: become_prompt = "%s password: " % become_prompt_method if op['become_ask_pass']: becomepass = getpass.getpass(prompt=become_prompt) if op['ask_pass'] and becomepass == '': becomepass = sshpass except EOFError: pass # we 'wrap' the passwords to prevent templating as # they can contain special chars and trigger it incorrectly if sshpass: sshpass = AnsibleUnsafeText(to_text(sshpass)) if becomepass: becomepass = AnsibleUnsafeText(to_text(becomepass)) return (sshpass, becomepass)
def ask_passwords(self): ''' prompt for connection and become passwords if needed ''' op = self.options sshpass = None becomepass = None become_prompt = '' become_prompt_method = "BECOME" if C.AGNOSTIC_BECOME_PROMPT else op.become_method.upper( ) try: if op.ask_pass: sshpass = getpass.getpass(prompt="SSH password: "******"%s password[defaults to SSH password]: " % become_prompt_method if sshpass: sshpass = AnsibleUnsafeText( to_bytes(sshpass, errors='strict', nonstring='simplerepr')) else: become_prompt = "%s password: " % become_prompt_method if op.become_ask_pass: becomepass = getpass.getpass(prompt=become_prompt) if op.ask_pass and becomepass == '': becomepass = sshpass if becomepass: becomepass = AnsibleUnsafeText(to_bytes(becomepass)) except EOFError: pass return (sshpass, becomepass)
def test_get_validated_value_string_rewrap_unsafe(self): attribute = FieldAttribute(isa='string') value = AnsibleUnsafeText(u'bar') templar = Templar(None) bsc = self.ClassUnderTest() result = bsc.get_validated_value('foo', attribute, value, templar) self.assertIsInstance(result, AnsibleUnsafeText) self.assertEqual(result, AnsibleUnsafeText(u'bar'))
def test_encode_decode_unsafe(): data = { 'key_value': AnsibleUnsafeText(u'{#NOTACOMMENT#}'), 'list': [AnsibleUnsafeText(u'{#NOTACOMMENT#}')], 'list_dict': [{'key_value': AnsibleUnsafeText(u'{#NOTACOMMENT#}')}]} json_expected = ( '{"key_value": {"__ansible_unsafe": "{#NOTACOMMENT#}"}, ' '"list": [{"__ansible_unsafe": "{#NOTACOMMENT#}"}], ' '"list_dict": [{"key_value": {"__ansible_unsafe": "{#NOTACOMMENT#}"}}]}' ) assert json.dumps(data, cls=AnsibleJSONEncoder, preprocess_unsafe=True, sort_keys=True) == json_expected assert json.loads(json_expected, cls=AnsibleJSONDecoder) == data
def test_unicode(self): u_text = u'nöel' unsafe_object = AnsibleUnsafeText(u_text) yaml_out = self._dump_string(unsafe_object, dumper=self.dumper) stream = self._build_stream(yaml_out) loader = self._loader(stream) data_from_yaml = loader.get_single_data() self.assertEqual(u_text, data_from_yaml)
def run_interpreter_discovery_if_necessary(s, task_vars, action, rediscover_python): """ Triggers ansible python interpreter discovery if requested. Caches this value the same way Ansible does it. For connections like `docker`, we want to rediscover the python interpreter because it could be different than what's ran on the host """ # keep trying different interpreters until we don't error if action._finding_python_interpreter: return action._possible_python_interpreter if s in ['auto', 'auto_legacy', 'auto_silent', 'auto_legacy_silent']: # python is the only supported interpreter_name as of Ansible 2.8.8 interpreter_name = 'python' discovered_interpreter_config = u'discovered_interpreter_%s' % interpreter_name if task_vars.get('ansible_facts') is None: task_vars['ansible_facts'] = {} if rediscover_python and task_vars.get( 'ansible_facts', {}).get(discovered_interpreter_config): # if we're rediscovering python then chances are we're running something like a docker connection # this will handle scenarios like running a playbook that does stuff + then dynamically creates a docker container, # then runs the rest of the playbook inside that container, and then rerunning the playbook again action._rediscovered_python = True # blow away the discovered_interpreter_config cache and rediscover del task_vars['ansible_facts'][discovered_interpreter_config] if discovered_interpreter_config not in task_vars['ansible_facts']: action._finding_python_interpreter = True # fake pipelining so discover_interpreter can be happy action._connection.has_pipelining = True s = AnsibleUnsafeText( discover_interpreter(action=action, interpreter_name=interpreter_name, discovery_mode=s, task_vars=task_vars)) # cache discovered interpreter task_vars['ansible_facts'][discovered_interpreter_config] = s action._connection.has_pipelining = False else: s = task_vars['ansible_facts'][discovered_interpreter_config] # propagate discovered interpreter as fact action._discovered_interpreter_key = discovered_interpreter_config action._discovered_interpreter = s action._finding_python_interpreter = False return s
def test_task_executor_run_clean_res(self): te = TaskExecutor(None, MagicMock(), None, None, None, None, None, None) te._get_loop_items = MagicMock(return_value=[1]) te._run_loop = MagicMock( return_value=[{ 'unsafe_bytes': AnsibleUnsafeBytes(b'{{ $bar }}'), 'unsafe_text': AnsibleUnsafeText(u'{{ $bar }}'), 'bytes': b'bytes', 'text': u'text', 'int': 1, }]) res = te.run() data = res['results'][0] self.assertIsInstance(data['unsafe_bytes'], AnsibleUnsafeText) self.assertIsInstance(data['unsafe_text'], AnsibleUnsafeText) self.assertIsInstance(data['bytes'], text_type) self.assertIsInstance(data['text'], text_type) self.assertIsInstance(data['int'], int)
def test_to_bytes_unsafe(): assert isinstance(to_bytes(AnsibleUnsafeText(u'foo')), AnsibleUnsafeBytes) assert to_bytes(AnsibleUnsafeText(u'foo')) == AnsibleUnsafeBytes(b'foo')
def test_wrap_var_unsafe_text(): assert isinstance(wrap_var(AnsibleUnsafeText(u'foo')), AnsibleUnsafeText)
def test_AnsibleUnsafeText(): assert isinstance(AnsibleUnsafeText(u'foo'), AnsibleUnsafe)