def launch(self, monitor=False, wait=False, timeout=None, **kwargs): """Launch a new ad-hoc command. Runs a user-defined command from Ansible Tower, immediately starts it, and returns back an ID in order for its status to be monitored. =====API DOCS===== Launch a new ad-hoc command. :param monitor: Flag that if set, immediately calls ``monitor`` on the newly launched command rather than exiting with a success. :type monitor: bool :param wait: Flag that if set, monitor the status of the job, but do not print while job is in progress. :type wait: bool :param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number of seconds. :type timeout: int :param `**kwargs`: Fields needed to create and launch an ad hoc command. :returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent ``wait`` call if ``wait`` flag is on; dictionary of "id" and "changed" if none of the two flags are on. :rtype: dict :raises tower_cli.exceptions.TowerCLIError: When ad hoc commands are not available in Tower backend. =====API DOCS===== """ # This feature only exists for versions 2.2 and up r = client.get('/') if 'ad_hoc_commands' not in r.json(): raise exc.TowerCLIError('Your host is running an outdated version' 'of Ansible Tower that can not run ' 'ad-hoc commands (2.2 or earlier)') # Pop the None arguments because we have no .write() method in # inheritance chain for this type of resource. This is needed self._pop_none(kwargs) # Actually start the command. debug.log('Launching the ad-hoc command.', header='details') result = client.post(self.endpoint, data=kwargs) command = result.json() command_id = command['id'] # If we were told to monitor the command once it started, then call # monitor from here. if monitor: return self.monitor(command_id, timeout=timeout) elif wait: return self.wait(command_id, timeout=timeout) # Return the command ID and other response data answer = OrderedDict(( ('changed', True), ('id', command_id), )) answer.update(result.json()) return answer
def test_mixture(self): """Test to ensure that both dict and OrderedDict can be parsed by ordered_dump.""" ordered_dict = OrderedDict() ordered_dict['a'] = {} ordered_dict['b'] = OrderedDict() for item in ordered_dict.values(): for i in reversed('abcdefg'): item[i] = ord(i) - ord('a') try: parser.ordered_dump(ordered_dict, Dumper=yaml.SafeDumper, default_flow_style=False) except Exception: self.fail("No exceptions should be raised here.")
def test_get(self): """Individual settings can be retrieved""" all_settings = OrderedDict({'FIRST': 123}) with client.test_mode as t: t.register_json('/settings/all/', all_settings) r = self.res.get('FIRST') self.assertEqual(r, {'id': 'FIRST', 'value': 123})
def test_update_invalid_setting_name(self): """A setting must exist to be updated""" all_settings = OrderedDict({'FIRST': 123}) with client.test_mode as t: t.register_json('/settings/all/', all_settings) t.register_json('/settings/all/', all_settings, method='PATCH') self.assertRaises(exc.NotFound, self.res.modify, 'MISSING', 456)
def launch(self, monitor=False, wait=False, timeout=None, become=False, **kwargs): """Launch a new ad-hoc command. Runs a user-defined command from Ansible Tower, immediately starts it, and returns back an ID in order for its status to be monitored. """ # This feature only exists for versions 2.2 and up r = client.get('/') if 'ad_hoc_commands' not in r.json(): raise exc.TowerCLIError('Your host is running an outdated version' 'of Ansible Tower that can not run ' 'ad-hoc commands (2.2 or earlier)') # Pop the None arguments because we have no .write() method in # inheritance chain for this type of resource. This is needed self._pop_none(kwargs) # Change the flag to the dictionary format if become: kwargs['become_enabled'] = True # Actually start the command. debug.log('Launching the ad-hoc command.', header='details') result = client.post(self.endpoint, data=kwargs) command = result.json() command_id = command['id'] # If we were told to monitor the command once it started, then call # monitor from here. if monitor: return self.monitor(command_id, timeout=timeout) elif wait: return self.wait(command_id, timeout=timeout) # Return the command ID and other response data answer = OrderedDict(( ('changed', True), ('id', command_id), )) answer.update(result.json()) return answer
def test_dunder_repr(self): """Establish that the OrderedDict __repr__ method works in the way we expect. """ d = OrderedDict() d['foo'] = 'spam' d['bar'] = 'eggs' self.assertEqual(repr(d), "{'foo': 'spam', 'bar': 'eggs'}")
def modify(self, setting, value): """Modify an already existing object. Positional argument SETTING is the setting name and VALUE is its value, which can be provided directly or obtained from a file name if prefixed with '@'. =====API DOCS===== Modify an already existing Tower setting. :param setting: The name of the Tower setting to be modified. :type setting: str :param value: The new value of the Tower setting. :type value: str :returns: A dictionary combining the JSON output of the modified resource, as well as two extra fields: "changed", a flag indicating if the resource is successfully updated; "id", an integer which is the primary key of the updated object. :rtype: dict =====API DOCS===== """ prev_value = new_value = self.get(setting)['value'] answer = OrderedDict() encrypted = '$encrypted$' in six.text_type(prev_value) if encrypted or six.text_type(prev_value) != six.text_type(value): if setting == 'LICENSE': r = client.post('/config/', data=self.coerce_type(setting, value)) new_value = r.json() else: r = client.patch( self.endpoint, data={setting: self.coerce_type(setting, value)} ) new_value = r.json()[setting] answer.update(r.json()) changed = encrypted or (prev_value != new_value) answer.update({ 'changed': changed, 'id': setting, 'value': new_value, }) return answer
def test_output_order(self): """Test that ordered_dump perserves the order of OrderedDict.""" ordered_dict = OrderedDict() for i in reversed('abcdefg'): ordered_dict[i] = ord(i) - ord('a') self.assertEqual( parser.ordered_dump(ordered_dict, Dumper=yaml.SafeDumper, default_flow_style=False), self.CORRECT_OUTPUT)
def test_list_all_by_category(self): """Settings can be listed by category""" system_settings = OrderedDict({'FEATURE_ENABLED': True}) auth_settings = OrderedDict({'SOME_API_KEY': 'ABC123'}) with client.test_mode as t: t.register_json('/settings/system/', system_settings) t.register_json('/settings/authentication/', auth_settings) r = self.res.list(category='system') self.assertEqual( r['results'], [{'id': 'FEATURE_ENABLED', 'value': True}] ) r = self.res.list(category='authentication') self.assertEqual( r['results'], [{'id': 'SOME_API_KEY', 'value': 'ABC123'}] )
def test_update_with_list(self): """A setting's value can be updated with a list""" options = {'actions': {'PUT': {'FIRST': {'type': 'list'}}}} all_settings = OrderedDict({'FIRST': []}) patched = OrderedDict({'FIRST': ['abc']}) with client.test_mode as t: t.register_json('/settings/all/', all_settings) t.register_json('/settings/all/', options, method='OPTIONS') t.register_json('/settings/all/', patched, method='PATCH') r = self.res.modify('FIRST', "['abc']") self.assertTrue(r['changed']) request = t.requests[0] self.assertEqual(request.method, 'GET') request = t.requests[1] self.assertEqual(request.method, 'OPTIONS') request = t.requests[2] self.assertEqual(request.method, 'PATCH') self.assertEqual(request.body, json.dumps({'FIRST': ['abc']}))
def test_encrypted_updates_always_patch(self): """Always PATCH a setting if it's an encrypted one""" options = {'actions': {'PUT': {'SECRET': {'type': 'string'}}}} all_settings = OrderedDict({'SECRET': '$encrypted$'}) patched = OrderedDict({'SECRET': '$encrypted$'}) with client.test_mode as t: t.register_json('/settings/all/', all_settings) t.register_json('/settings/all/', options, method='OPTIONS') t.register_json('/settings/all/', patched, method='PATCH') r = self.res.modify('SECRET', 'SENSITIVE') self.assertTrue(r['changed']) self.assertEqual(len(t.requests), 3) request = t.requests[0] self.assertEqual(request.method, 'GET') request = t.requests[1] self.assertEqual(request.method, 'OPTIONS') request = t.requests[2] self.assertEqual(request.method, 'PATCH') self.assertEqual(request.body, json.dumps({'SECRET': 'SENSITIVE'}))
def test_idempotent_updates_ignored(self): """Don't PATCH a setting if the provided value didn't change""" all_settings = OrderedDict({'FIRST': 123}) with client.test_mode as t: t.register_json('/settings/all/', all_settings) r = self.res.modify('FIRST', '123') self.assertFalse(r['changed']) self.assertEqual(len(t.requests), 1) request = t.requests[0] self.assertEqual(request.method, 'GET')
def test_update_with_unicode(self): """A setting's value can be updated with unicode""" new_val = six.u('Iñtërnâtiônàlizætiøn') options = {'actions': {'PUT': {'FIRST': {'type': 'string'}}}} all_settings = OrderedDict({'FIRST': 'FOO'}) patched = OrderedDict({'FIRST': new_val}) with client.test_mode as t: t.register_json('/settings/all/', all_settings) t.register_json('/settings/all/', options, method='OPTIONS') t.register_json('/settings/all/', patched, method='PATCH') r = self.res.modify('FIRST', new_val) self.assertTrue(r['changed']) request = t.requests[0] self.assertEqual(request.method, 'GET') request = t.requests[1] self.assertEqual(request.method, 'OPTIONS') request = t.requests[2] self.assertEqual(request.method, 'PATCH') self.assertEqual(request.body, json.dumps({'FIRST': new_val}))
def modify(self, setting, value): """Modify an already existing object. Positional argument SETTING is the setting name and VALUE is its value, which can be provided directly or obtained from a file name if prefixed with '@'. """ prev_value = new_value = self.get(setting)['value'] answer = OrderedDict() encrypted = '$encrypted$' in six.text_type(prev_value) if encrypted or six.text_type(prev_value) != six.text_type(value): if setting == 'LICENSE': r = client.post('/config/', data=self.coerce_type(setting, value)) new_value = r.json() else: r = client.patch( self.endpoint, data={setting: self.coerce_type(setting, value)}) new_value = r.json()[setting] answer.update(r.json()) changed = encrypted or (prev_value != new_value) answer.update({ 'changed': changed, 'id': setting, 'value': new_value, }) return answer
def modify(self, setting, value): """Modify an already existing object.""" prev_value = new_value = self.get(setting)['value'] answer = OrderedDict() encrypted = '$encrypted$' in six.text_type(prev_value) if encrypted or six.text_type(prev_value) != six.text_type(value): if setting == 'LICENSE': r = client.post('/config/', data=self.coerce_type(setting, value)) new_value = r.json() else: r = client.patch( self.endpoint, data={setting: self.coerce_type(setting, value)}) new_value = r.json()[setting] answer.update(r.json()) changed = encrypted or (prev_value != new_value) answer.update({ 'changed': changed, 'id': setting, 'value': new_value, }) return answer
def launch(self, monitor=False, timeout=None, become=False, **kwargs): """Launch a new ad-hoc command. Runs a user-defined command from Ansible Tower, immediately starts it, and returns back an ID in order for its status to be monitored. """ # This feature only exists for versions 2.2 and up r = client.get('/') if 'ad_hoc_commands' not in r.json(): raise exc.TowerCLIError('Your host is running an outdated version' 'of Ansible Tower that can not run ' 'ad-hoc commands (2.2 or earlier)') # Pop the None arguments because we have no .write() method in # inheritance chain for this type of resource. This is needed self._pop_none(kwargs) # Change the flag to the dictionary format if become: kwargs['become_enabled'] = True # Actually start the command. debug.log('Launching the ad-hoc command.', header='details') result = client.post(self.endpoint, data=kwargs) command = result.json() command_id = command['id'] # If we were told to monitor the command once it started, then call # monitor from here. if monitor: return self.monitor(command_id, timeout=timeout) # Return the command ID and other response data answer = OrderedDict(( ('changed', True), ('id', command_id), )) answer.update(result.json()) return answer
def test_license_update(self): """The software license can be updated""" all_settings = OrderedDict({'LICENSE': {}}) with client.test_mode as t: t.register_json('/settings/all/', all_settings) t.register_json('/config/', all_settings, method='POST') self.res.modify('LICENSE', LICENSE_DATA) request = t.requests[0] self.assertEqual(request.method, 'GET') request = t.requests[1] self.assertEqual(request.method, 'POST') self.assertEqual(json.loads(request.body), json.loads(LICENSE_DATA))
def test_list_all(self): """All settings can be listed""" all_settings = OrderedDict({'FIRST': 123, 'SECOND': 'foo'}) with client.test_mode as t: t.register_json('/settings/all/', all_settings) r = self.res.list() self.assertEqual(sorted(r['results'], key=lambda k: k['id']), [{ 'id': 'FIRST', 'value': 123 }, { 'id': 'SECOND', 'value': 'foo' }])
def test_remove_encrypted_value(self): test_hash = { 'first': 'ok', 'second': common.ENCRYPTED_VALUE, 'sub': OrderedDict({ 'first': common.ENCRYPTED_VALUE, 'second': 'ok', }), } result_hash = { 'first': 'ok', 'second': '', 'sub': { 'first': '', 'second': 'ok', }, } common.remove_encrypted_values(test_hash) self.assertEqual(test_hash, result_hash, "Failed to remove encrypted values from hash")
def test_get_invalid(self): """Invalid setting names throw an error""" all_settings = OrderedDict({'FIRST': 123}) with client.test_mode as t: t.register_json('/settings/all/', all_settings) self.assertRaises(exc.NotFound, self.res.get, 'MISSING')
def monitor(self, pk, min_interval=1, max_interval=30, timeout=None, outfile=sys.stdout, **kwargs): """Monitor a running job. Blocks further input until the job completes (whether successfully or unsuccessfully) and a final status can be given. """ dots = itertools.cycle([0, 1, 2, 3]) longest_string = 0 interval = min_interval start = time.time() # Poll the Ansible Tower instance for status, and print the status # to the outfile (usually standard out). # # Note that this is one of the few places where we use `secho` # even though we're in a function that might theoretically be imported # and run in Python. This seems fine; outfile can be set to /dev/null # and very much the normal use for this method should be CLI # monitoring. result = self.status(pk, detail=True) last_poll = time.time() timeout_check = 0 while result['status'] != 'successful': # If the job has failed, we want to raise an Exception for that # so we get a non-zero response. if result['failed']: if is_tty(outfile) and not settings.verbose: secho('\r' + ' ' * longest_string + '\n', file=outfile) raise exc.JobFailure('Job failed.') # Sanity check: Have we officially timed out? # The timeout check is incremented below, so this is checking # to see if we were timed out as of the previous iteration. # If we are timed out, abort. if timeout and timeout_check - start > timeout: raise exc.Timeout('Monitoring aborted due to timeout.') # If the outfile is a TTY, print the current status. output = '\rCurrent status: %s%s' % (result['status'], '.' * next(dots)) if longest_string > len(output): output += ' ' * (longest_string - len(output)) else: longest_string = len(output) if is_tty(outfile) and not settings.verbose: secho(output, nl=False, file=outfile) # Put the process to sleep briefly. time.sleep(0.2) # Sanity check: Have we reached our timeout? # If we're about to time out, then we need to ensure that we # do one last check. # # Note that the actual timeout will be performed at the start # of the **next** iteration, so there's a chance for the job's # completion to be noted first. timeout_check = time.time() if timeout and timeout_check - start > timeout: last_poll -= interval # If enough time has elapsed, ask the server for a new status. # # Note that this doesn't actually do a status check every single # time; we want the "spinner" to spin even if we're not actively # doing a check. # # So, what happens is that we are "counting down" (actually up) # to the next time that we intend to do a check, and once that # time hits, we do the status check as part of the normal cycle. if time.time() - last_poll > interval: result = self.status(pk, detail=True) last_poll = time.time() interval = min(interval * 1.5, max_interval) # If the outfile is *not* a TTY, print a status update # when and only when we make an actual check to job status. if not is_tty(outfile) or settings.verbose: click.echo('Current status: %s' % result['status'], file=outfile) # Wipe out the previous output if is_tty(outfile) and not settings.verbose: secho('\r' + ' ' * longest_string, file=outfile, nl=False) secho('\r', file=outfile, nl=False) # Return the job ID and other response data answer = OrderedDict(( ('changed', True), ('id', pk), )) answer.update(result) # Make sure to return ID of resource and not update number # relevant for project creation and update answer['id'] = pk return answer
def write(self, pk=None, create_on_missing=False, fail_on_found=False, force_on_exists=True, **kwargs): """Modify the given object using the Ansible Tower API. Return the object and a boolean value informing us whether or not the record was changed. If `create_on_missing` is True, then an object matching the appropriate unique criteria is not found, then a new object is created. If there are no unique criteria on the model (other than the primary key), then this will always constitute a creation (even if a match exists) unless the primary key is sent. If `fail_on_found` is True, then if an object matching the unique criteria already exists, the operation fails. If `force_on_exists` is True, then if an object is modified based on matching via. unique fields (as opposed to the primary key), other fields are updated based on data sent. If `force_on_exists` is set to False, then the non-unique values are only written in a creation case. """ existing_data = {} # Remove default values (anything where the value is None). self._pop_none(kwargs) # Determine which record we are writing, if we weren't given a # primary key. if not pk: debug.log('Checking for an existing record.', header='details') existing_data = self._lookup( fail_on_found=fail_on_found, fail_on_missing=not create_on_missing, include_debug_header=False, **kwargs ) if existing_data: pk = existing_data['id'] else: # We already know the primary key, but get the existing data. # This allows us to know whether the write made any changes. debug.log('Getting existing record.', header='details') existing_data = self.get(pk) # Sanity check: Are we missing required values? # If we don't have a primary key, then all required values must be # set, and if they're not, it's an error. required_fields = [i.key or i.name for i in self.fields if i.required] missing_fields = [i for i in required_fields if i not in kwargs] if missing_fields and not pk: raise exc.BadRequest('Missing required fields: %s' % ', '.join(missing_fields).replace('_', '-')) # Sanity check: Do we need to do a write at all? # If `force_on_exists` is False and the record was, in fact, found, # then no action is required. if pk and not force_on_exists: debug.log('Record already exists, and --force-on-exists is off; ' 'do nothing.', header='decision', nl=2) answer = OrderedDict(( ('changed', False), ('id', pk), )) answer.update(existing_data) return answer # Similarly, if all existing data matches our write parameters, # there's no need to do anything. if all([kwargs[k] == existing_data.get(k, None) for k in kwargs.keys()]): debug.log('All provided fields match existing data; do nothing.', header='decision', nl=2) answer = OrderedDict(( ('changed', False), ('id', pk), )) answer.update(existing_data) return answer # Get the URL and method to use for the write. url = self.endpoint method = 'POST' if pk: url += '%d/' % pk method = 'PATCH' # If debugging is on, print the URL and data being sent. debug.log('Writing the record.', header='details') # Actually perform the write. r = getattr(client, method.lower())(url, data=kwargs) # At this point, we know the write succeeded, and we know that data # was changed in the process. answer = OrderedDict(( ('changed', True), ('id', r.json()['id']), )) answer.update(r.json()) return answer
def write(self, pk=None, create_on_missing=False, fail_on_found=False, force_on_exists=True, **kwargs): """Modify the given object using the Ansible Tower API. Return the object and a boolean value informing us whether or not the record was changed. If `create_on_missing` is True, then an object matching the appropriate unique criteria is not found, then a new object is created. If there are no unique criteria on the model (other than the primary key), then this will always constitute a creation (even if a match exists) unless the primary key is sent. If `fail_on_found` is True, then if an object matching the unique criteria already exists, the operation fails. If `force_on_exists` is True, then if an object is modified based on matching via. unique fields (as opposed to the primary key), other fields are updated based on data sent. If `force_on_exists` is set to False, then the non-unique values are only written in a creation case. """ existing_data = {} # Remove default values (anything where the value is None). # click is unfortunately bad at the way it sends through unspecified # defaults. for key, value in copy(kwargs).items(): if value is None: kwargs.pop(key) if hasattr(value, 'read'): kwargs[key] = value.read() # Determine which record we are writing, if we weren't given a # primary key. if not pk: debug.log('Checking for an existing record.', header='details') existing_data = self._lookup(fail_on_found=fail_on_found, fail_on_missing=not create_on_missing, include_debug_header=False, **kwargs) if existing_data: pk = existing_data['id'] else: # We already know the primary key, but get the existing data. # This allows us to know whether the write made any changes. debug.log('Getting existing record.', header='details') existing_data = self.get(pk) # Sanity check: Are we missing required values? # If we don't have a primary key, then all required values must be # set, and if they're not, it's an error. required_fields = [i.key or i.name for i in self.fields if i.required] missing_fields = [i for i in required_fields if i not in kwargs] if missing_fields and not pk: raise exc.BadRequest('Missing required fields: %s' % ', '.join(missing_fields)) # Sanity check: Do we need to do a write at all? # If `force_on_exists` is False and the record was, in fact, found, # then no action is required. if pk and not force_on_exists: debug.log( 'Record already exists, and --force-on-exists is off; ' 'do nothing.', header='decision', nl=2) answer = OrderedDict(( ('changed', False), ('id', pk), )) answer.update(existing_data) return answer # Similarly, if all existing data matches our write parameters, # there's no need to do anything. if all( [kwargs[k] == existing_data.get(k, None) for k in kwargs.keys()]): debug.log('All provided fields match existing data; do nothing.', header='decision', nl=2) answer = OrderedDict(( ('changed', False), ('id', pk), )) answer.update(existing_data) return answer # Get the URL and method to use for the write. url = self.endpoint method = 'POST' if pk: url += '%d/' % pk method = 'PATCH' # If debugging is on, print the URL and data being sent. debug.log('Writing the record.', header='details') # Actually perform the write. r = getattr(client, method.lower())(url, data=kwargs) # At this point, we know the write succeeded, and we know that data # was changed in the process. answer = OrderedDict(( ('changed', True), ('id', r.json()['id']), )) answer.update(r.json()) return answer
def monitor(self, pk, parent_pk=None, timeout=None, interval=0.5, outfile=sys.stdout, **kwargs): """ Stream the standard output from a job, project update, or inventory udpate. """ # If we do not have the unified job info, infer it from parent if pk is None: pk = self.last_job_data(parent_pk, **kwargs)['id'] job_endpoint = '%s%s/' % (self.unified_job_type, pk) # Pause until job is in running state self.wait(pk, exit_on=['running', 'successful']) # Loop initialization start = time.time() start_line = 0 result = client.get(job_endpoint).json() click.echo('\033[0;91m------Starting Standard Out Stream------\033[0m', nl=2, file=outfile) # Poll the Ansible Tower instance for status and content, # and print standard out to the out file while not result['failed'] and result['status'] != 'successful': result = client.get(job_endpoint).json() # Put the process to sleep briefly. time.sleep(interval) # Make request to get standard out content = self.lookup_stdout(pk, start_line, full=False) # In the first moments of running the job, the standard out # may not be available yet if not content.startswith(b"Waiting for results"): line_count = len(content.splitlines()) start_line += line_count click.echo(content, nl=0) if timeout and time.time() - start > timeout: raise exc.Timeout('Monitoring aborted due to timeout.') # Special final line for closure with workflow jobs if self.endpoint == '/workflow_jobs/': click.echo(self.lookup_stdout(pk, start_line, full=True), nl=1) click.echo('\033[0;91m------End of Standard Out Stream--------\033[0m', nl=2, file=outfile) if result['failed']: raise exc.JobFailure('Job failed.') # Return the job ID and other response data answer = OrderedDict(( ('changed', True), ('id', pk), )) answer.update(result) # Make sure to return ID of resource and not update number # relevant for project creation and update if parent_pk: answer['id'] = parent_pk else: answer['id'] = pk return answer