def create(self, fail_on_found=False, force_on_exists=False, **kwargs): """Create a group and, if necessary, modify the inventory source within the group. """ group_fields = [f.name for f in self.fields] if kwargs.get('parent', None): parent_data = self.set_child_endpoint(parent=kwargs['parent'], inventory=kwargs.get( 'inventory', None)) kwargs['inventory'] = parent_data['inventory'] group_fields.append('group') elif 'inventory' not in kwargs: raise exc.UsageError('To create a group, you must provide a ' 'parent inventory or parent group.') # Break out the options for the group vs its inventory_source is_kwargs = {} for field in kwargs.copy(): if field not in group_fields: if field == 'job_timeout': is_kwargs['timeout'] = kwargs.pop(field) else: is_kwargs[field] = kwargs.pop(field) # Handle alias for "manual" source if is_kwargs.get('source', None) == 'manual': is_kwargs.pop('source') # First, create the group. answer = super(Resource, self).create(fail_on_found=fail_on_found, force_on_exists=force_on_exists, **kwargs) # If the group already exists and we aren't supposed to make changes, # then we're done. if not force_on_exists and not answer['changed']: return answer # Sanity check: A group was created, but do we need to do anything # with the inventory source at all? If no credential or source # was specified, then we'd just be updating the inventory source # with an effective no-op. if len(is_kwargs) == 0: return answer # Get the inventory source ID ("isid"). # Inventory sources are not created directly; rather, one was created # automatically when the group was created. isid = self._get_inventory_source_id(answer) # We now have our inventory source ID; modify it according to the # provided parameters. isrc = get_resource('inventory_source') is_answer = isrc.write(pk=isid, force_on_exists=True, **is_kwargs) # If either the inventory_source or the group objects were modified # then refelect this in the output to avoid confusing the user. if is_answer['changed']: answer['changed'] = True return answer
def convert(self, value, param, ctx): s = super(StructuredInput, self).convert(value, param, ctx) try: return string_to_dict(s, allow_kv=False) except Exception: raise exc.UsageError( 'Error loading structured input given by %s parameter. Please ' 'check the validity of your JSON/YAML format.' % param.name)
def obj_res(data, fail_on=['type', 'obj', 'res']): """ Given some CLI input data, Returns the following and their types: obj - the role grantee res - the resource that the role applies to """ errors = [] if not data.get('type', None) and 'type' in fail_on: errors += ['You must provide a role type to use this command.'] # Find the grantee, and remove them from resource_list obj = None obj_type = None for fd in ACTOR_FIELDS: if data.get(fd, False): if not obj: obj = data[fd] obj_type = fd else: errors += [ 'You can not give a role to a user ' 'and team at the same time.' ] break if not obj and 'obj' in fail_on: errors += [ 'You must specify either user or ' 'team to use this command.' ] # Out of the resource list, pick out available valid resource field res = None res_type = None for fd in RESOURCE_FIELDS: if data.get(fd, False): if not res: res = data[fd] res_type = fd if res_type == 'target_team': res_type = 'team' else: errors += [ 'You can only give a role to one ' 'type of resource at a time.' ] break if not res and 'res' in fail_on: errors += [ 'You must specify a target resource ' 'to use this command.' ] if errors: raise exc.UsageError("\n".join(errors)) return obj, obj_type, res, res_type
def helper(kwargs, obj): """The helper function preceding actual function that aggregates unified jt fields. """ unified_job_template = None for item in UNIFIED_JT: if kwargs.get(item, None) is not None: jt_id = kwargs.pop(item) if unified_job_template is None: unified_job_template = (item, jt_id) else: raise exc.UsageError( 'More than one unified job template fields provided, ' 'please tighten your criteria.' ) if unified_job_template is not None: kwargs['unified_job_template'] = unified_job_template[1] obj.identity = tuple(list(obj.identity) + ['unified_job_template']) return '/'.join([UNIFIED_JT[unified_job_template[0]], str(unified_job_template[1]), 'schedules/']) elif is_create: raise exc.UsageError('You must provide exactly one unified job' ' template field during creation.')
def list(self, root=False, **kwargs): """Return a list of groups. =====API DOCS===== Retrieve a list of groups. :param root: Flag that if set, only root groups of a specific inventory will be listed. :type root: bool :param parent: Primary key or name of the group whose child groups will be listed. :type parent: str :param all_pages: Flag that if set, collect all pages of content from the API when returning results. :type all_pages: bool :param page: The page to show. Ignored if all_pages is set. :type page: int :param query: Contains 2-tuples used as query parameters to filter resulting resource objects. :type query: list :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects. :returns: A JSON object containing details of all resource objects returned by Tower backend. :rtype: dict :raises tower_cli.exceptions.UsageError: When ``root`` flag is on and ``inventory`` is not present in ``**kwargs``. =====API DOCS===== """ # Option to list children of a parent group if kwargs.get('parent', None): self.set_child_endpoint(parent=kwargs['parent'], inventory=kwargs.get('inventory', None)) kwargs.pop('parent') # Sanity check: If we got `--root` and no inventory, that's an error. if root and not kwargs.get('inventory', None): raise exc.UsageError( 'The --root option requires specifying an inventory also.') # If we are tasked with getting root groups, do that. if root: inventory_id = kwargs['inventory'] r = client.get('/inventories/%d/root_groups/' % inventory_id) return r.json() # Return the superclass implementation. return super(Resource, self).list(**kwargs)
def create(self, fail_on_found=False, force_on_exists=False, **kwargs): """Create a group. =====API DOCS===== Create a group. :param parent: Primary key or name of the group which will be the parent of created group. :type parent: str :param fail_on_found: Flag that if set, the operation fails if an object matching the unique criteria already exists. :type fail_on_found: bool :param force_on_exists: Flag that if set, then if a match is found on unique fields, other fields will be updated to the provided values.; If unset, a match causes the request to be a no-op. :type force_on_exists: bool :param `**kwargs`: Keyword arguements which, all together, will be used as POST body to create the resource object. :returns: A dictionary combining the JSON output of the created resource, as well as two extra fields: "changed", a flag indicating if the resource is created successfully; "id", an integer which is the primary key of the created object. :rtype: dict :raises tower_cli.exceptions.UsageError: When inventory is not provided in ``**kwargs`` and ``parent`` is not provided. =====API DOCS===== """ if kwargs.get('parent', None): parent_data = self.set_child_endpoint(parent=kwargs['parent'], inventory=kwargs.get( 'inventory', None)) kwargs['inventory'] = parent_data['inventory'] elif 'inventory' not in kwargs: raise exc.UsageError( 'To create a group, you must provide a parent inventory or parent group.' ) return super(Resource, self).create(fail_on_found=fail_on_found, force_on_exists=force_on_exists, **kwargs)
def list(self, root=False, **kwargs): """Return a list of groups.""" # Option to list children of a parent group if kwargs.get('parent', None): self.set_child_endpoint(parent=kwargs['parent'], inventory=kwargs.get('inventory', None)) kwargs.pop('parent') # Sanity check: If we got `--root` and no inventory, that's an # error. if root and not kwargs.get('inventory', None): raise exc.UsageError('The --root option requires specifying an ' 'inventory also.') # If we are tasked with getting root groups, do that. if root: inventory_id = kwargs['inventory'] r = client.get('/inventories/%d/root_groups/' % inventory_id) return r.json() # Return the superclass implementation. return super(Resource, self).list(**kwargs)
def config(key=None, value=None, scope='user', global_=False, unset=False): """Read or write tower-cli configuration. `tower config` saves the given setting to the appropriate Tower CLI; either the user's ~/.tower_cli.cfg file, or the /etc/tower/tower_cli.cfg file if --global is used. Writing to /etc/tower/tower_cli.cfg is likely to require heightened permissions (in other words, sudo). """ # If the old-style `global_` option is set, issue a deprecation notice. if global_: scope = 'global' warnings.warn( 'The `--global` option is deprecated and will be ' 'removed. Use `--scope=global` to get the same effect.', DeprecationWarning) # If no key was provided, print out the current configuration # in play. if not key: seen = set() parser_desc = { 'runtime': 'Runtime options.', 'environment': 'Options from environment variables.', 'local': 'Local options (set with `tower-cli config ' '--scope=local`; stored in .tower_cli.cfg of this ' 'directory or a parent)', 'user': '******' '~/.tower_cli.cfg).', 'global': 'Global options (set with `tower-cli config ' '--scope=global`, stored in /etc/tower/tower_cli.cfg).', 'defaults': 'Defaults.', } # Iterate over each parser (English: location we can get settings from) # and print any settings that we haven't already seen. # # We iterate over settings from highest precedence to lowest, so any # seen settings are overridden by the version we iterated over already. click.echo('') for name, parser in zip(settings._parser_names, settings._parsers): # Determine if we're going to see any options in this # parser that get echoed. will_echo = False for option in parser.options('general'): if option in seen: continue will_echo = True # Print a segment header if will_echo: secho('# %s' % parser_desc[name], fg='green', bold=True) # Iterate over each option in the parser and, if we haven't # already seen an option at higher precedence, print it. for option in parser.options('general'): if option in seen: continue _echo_setting(option) seen.add(option) # Print a nice newline, for formatting. if will_echo: click.echo('') return # Sanity check: Is this a valid configuration option? If it's not # a key we recognize, abort. if not hasattr(settings, key): raise exc.TowerCLIError('Invalid configuration option "%s".' % key) # Sanity check: The combination of a value and --unset makes no # sense. if value and unset: raise exc.UsageError('Cannot provide both a value and --unset.') # If a key was provided but no value was provided, then just # print the current value for that key. if key and not value and not unset: _echo_setting(key) return # Okay, so we're *writing* a key. Let's do this. # First, we need the appropriate file. filename = os.path.expanduser('~/.tower_cli.cfg') if scope == 'global': if not os.path.isdir('/etc/tower/'): raise exc.TowerCLIError('/etc/tower/ does not exist, and this ' 'command cowardly declines to create it.') filename = '/etc/tower/tower_cli.cfg' elif scope == 'local': filename = '.tower_cli.cfg' # Read in the appropriate config file, write this value, and save # the result back to the file. parser = Parser() parser.add_section('general') parser.read(filename) if unset: parser.remove_option('general', key) else: parser.set('general', key, value) with open(filename, 'w') as config_file: parser.write(config_file) # Give rw permissions to user only fix for issue number 48 try: os.chmod(filename, stat.S_IRUSR | stat.S_IWUSR) except Exception as e: warnings.warn( 'Unable to set permissions on {0} - {1} '.format(filename, e), UserWarning) click.echo('Configuration updated successfully.')
def launch(self, job_template=None, monitor=False, wait=False, timeout=None, no_input=True, extra_vars=None, **kwargs): """Launch a new job based on a job template. Creates a new job in Ansible Tower, immediately starts it, and returns back an ID in order for its status to be monitored. =====API DOCS===== Launch a new job based on a job template. :param job_template: Primary key or name of the job template to launch new job. :type job_template: str :param monitor: Flag that if set, immediately calls ``monitor`` on the newly launched job rather than exiting with a success. :type monitor: bool :param wait: Flag that if set, monitor the status of the job, but do not print while job is in progress. :type wait: bool :param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number of seconds. :type timeout: int :param no_input: Flag that if set, suppress any requests for input. :type no_input: bool :param extra_vars: yaml formatted texts that contains extra variables to pass on. :type extra_vars: array of strings :param diff_mode: Specify diff mode for job template to run. :type diff_mode: bool :param limit: Specify host limit for job template to run. :type limit: str :param tags: Specify tagged actions in the playbook to run. :type tags: str :param skip_tags: Specify tagged actions in the playbook to omit. :type skip_tags: str :param job_type: Specify job type for job template to run. :type job_type: str :param verbosity: Specify verbosity of the playbook run. :type verbosity: int :param inventory: Specify machine credential for job template to run. :type inventory: str :param credential: Specify machine credential for job template to run. :type credential: str :returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent ``wait`` call if ``wait`` flag is on; Result of subsequent ``status`` call if none of the two flags are on. :rtype: dict =====API DOCS===== """ # Get the job template from Ansible Tower. # This is used as the baseline for starting the job. jt_resource = get_resource('job_template') jt = jt_resource.get(job_template) # Update the job data for special treatment of certain fields # Special case for job tags, historically just called --tags tags = kwargs.get('tags', None) data = {} if tags: data['job_tags'] = tags # Special case for cross-version compatibility with credentials cred_arg = kwargs.pop('credential', ()) if isinstance(cred_arg, (list, tuple)): credentials = cred_arg else: credentials = [cred_arg] if credentials: if 'credentials' in jt['related']: # Has Tower 3.3 / multi-cred support # combine user-provided credentials with JT credentials jt_creds = set(c['id'] for c in jt['summary_fields']['credentials']) kwargs['credentials'] = list(set(credentials) | jt_creds) else: if len(credentials) > 1: raise exc.UsageError( 'Providing multiple credentials on launch can only be ' 'done with Tower version 3.3 and higher or recent AWX.' ) kwargs['credential'] = credentials[0] # Initialize an extra_vars list that starts with the job template # preferences first, if they exist extra_vars_list = [] if 'extra_vars' in data and len(data['extra_vars']) > 0: # But only do this for versions before 2.3 debug.log('Getting version of Tower.', header='details') r = client.get('/config/') if LooseVersion(r.json()['version']) < LooseVersion('2.4'): extra_vars_list = [data['extra_vars']] # Add the runtime extra_vars to this list if extra_vars: extra_vars_list += list(extra_vars) # accept tuples # If the job template requires prompting for extra variables, # do so (unless --no-input is set). if jt.get('ask_variables_on_launch', False) and not no_input \ and not extra_vars: # If JT extra_vars are JSON, echo them to user as YAML initial = parser.process_extra_vars([jt['extra_vars']], force_json=False) initial = '\n'.join(( '# Specify extra variables (if any) here as YAML.', '# Lines beginning with "#" denote comments.', initial, )) extra_vars = click.edit(initial) or '' if extra_vars != initial: extra_vars_list = [extra_vars] # Data is starting out with JT variables, and we only want to # include extra_vars that come from the algorithm here. data.pop('extra_vars', None) # Replace/populate data fields if prompted. modified = set() for resource in PROMPT_LIST: if jt.pop('ask_' + resource + '_on_launch', False) and not no_input: resource_object = kwargs.get(resource, None) if type(resource_object) == types.Related: resource_class = get_resource(resource) resource_object = resource_class.get(resource).pop( 'id', None) if resource_object is None: debug.log('{0} is asked at launch but not provided'.format( resource), header='warning') elif resource != 'tags': data[resource] = resource_object modified.add(resource) # Dump extra_vars into JSON string for launching job if len(extra_vars_list) > 0: data['extra_vars'] = parser.process_extra_vars(extra_vars_list, force_json=True) # Create the new job in Ansible Tower. start_data = {} endpoint = '/job_templates/%d/launch/' % jt['id'] if 'extra_vars' in data and len(data['extra_vars']) > 0: start_data['extra_vars'] = data['extra_vars'] if tags: start_data['job_tags'] = data['job_tags'] for resource in PROMPT_LIST: if resource in modified: start_data[resource] = data[resource] # There's a non-trivial chance that we are going to need some # additional information to start the job; in particular, many jobs # rely on passwords entered at run-time. # # If there are any such passwords on this job, ask for them now. debug.log('Asking for information necessary to start the job.', header='details') job_start_info = client.get(endpoint).json() for password in job_start_info.get('passwords_needed_to_start', []): start_data[password] = getpass('Password for %s: ' % password) # Actually start the job. debug.log('Launching the job.', header='details') self._pop_none(kwargs) kwargs.update(start_data) job_started = client.post(endpoint, data=kwargs) # Get the job ID from the result. job_id = job_started.json()['id'] # If returning json indicates any ignored fields, display it in # verbose mode. if job_started.text == '': ignored_fields = {} else: ignored_fields = job_started.json().get('ignored_fields', {}) has_ignored_fields = False for key, value in ignored_fields.items(): if value and value != '{}': if not has_ignored_fields: debug.log('List of ignored fields on the server side:', header='detail') has_ignored_fields = True debug.log('{0}: {1}'.format(key, value)) # Get some information about the running job to print result = self.status(pk=job_id, detail=True) result['changed'] = True # If we were told to monitor the job once it started, then call # monitor from here. if monitor: return self.monitor(job_id, timeout=timeout) elif wait: return self.wait(job_id, timeout=timeout) return result