def execute_init(self): """ Executes the init action, which creates the skeleton framework of a role that complies with the galaxy metadata format. """ init_path = self.get_opt('init_path', './') force = self.get_opt('force', False) offline = self.get_opt('offline', False) role_name = self.args.pop(0).strip() if self.args else None if not role_name: raise AnsibleOptionsError("- no role name specified for init") role_path = os.path.join(init_path, role_name) if os.path.exists(role_path): if os.path.isfile(role_path): raise AnsibleError( "- the path %s already exists, but is a file - aborting" % role_path) elif not force: raise AnsibleError( "- the directory %s already exists." "you can use --force to re-initialize this directory,\n" "however it will reset any main.yml files that may have\n" "been modified there already." % role_path) platforms = [] if not offline: platforms = self.api.get_list("platforms") or [] # group the list of platforms from the api based # on their names, with the release field being # appended to a list of versions platform_groups = defaultdict(list) for platform in platforms: platform_groups[platform['name']].append(platform['release']) platform_groups[platform['name']].sort() inject_data = dict( role_name=role_name, author='your name', description='your description', company='your company (optional)', license='license (GPLv2, CC-BY, etc)', issue_tracker_url='http://example.com/issue/tracker', min_ansible_version='1.2', platforms=platform_groups, container_enabled=self.options.container_enabled) # create role directory if not os.path.exists(role_path): os.makedirs(role_path) role_skeleton = self.galaxy.default_role_skeleton_path role_skeleton = os.path.expanduser(role_skeleton) template_env = Environment(loader=FileSystemLoader(role_skeleton)) for root, dirs, files in os.walk(role_skeleton, topdown=True): rel_root = os.path.relpath(root, role_skeleton) in_templates_dir = rel_root.split(os.sep, 1)[0] == 'templates' for f in files: filename, ext = os.path.splitext(f) if ext == ".j2" and not in_templates_dir: src_template = os.path.join(rel_root, f) dest_file = os.path.join(role_path, rel_root, filename) template_env.get_template(src_template).stream( inject_data).dump(dest_file) else: f_rel_path = os.path.relpath(os.path.join(root, f), role_skeleton) shutil.copyfile(os.path.join(root, f), os.path.join(role_path, f_rel_path)) for d in dirs: dir_path = os.path.join(role_path, rel_root, d) if not os.path.exists(dir_path): os.makedirs(dir_path) display.display("- %s was created successfully" % role_name)
def execute_install(self): """ Executes the installation action. The args list contains the roles to be installed, unless -f was specified. The list of roles can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file. """ role_file = self.get_opt("role_file", None) if len(self.args) == 0 and role_file is None: # the user needs to specify one of either --role-file # or specify a single user/role name raise AnsibleOptionsError( "- you must specify a user/role name or a roles file") elif len(self.args) == 1 and role_file is not None: # using a role file is mutually exclusive of specifying # the role name on the command line raise AnsibleOptionsError( "- please specify a user/role name, or a roles file, but not both" ) no_deps = self.get_opt("no_deps", False) force = self.get_opt('force', False) roles_left = [] if role_file: try: f = open(role_file, 'r') if role_file.endswith('.yaml') or role_file.endswith('.yml'): try: required_roles = yaml.safe_load(f.read()) except Exception as e: raise AnsibleError( "Unable to load data from the requirements file: %s" % role_file) if required_roles is None: raise AnsibleError("No roles found in file: %s" % role_file) for role in required_roles: role = RoleRequirement.role_yaml_parse(role) display.vvv('found role %s in yaml file' % str(role)) if 'name' not in role and 'scm' not in role: raise AnsibleError( "Must specify name or src for role") roles_left.append(GalaxyRole(self.galaxy, **role)) else: display.deprecated( "going forward only the yaml format will be supported") # roles listed in a file, one per line for rline in f.readlines(): if rline.startswith("#") or rline.strip() == '': continue display.debug('found role %s in text file' % str(rline)) role = RoleRequirement.role_yaml_parse(rline.strip()) roles_left.append(GalaxyRole(self.galaxy, **role)) f.close() except (IOError, OSError) as e: display.error('Unable to open %s: %s' % (role_file, str(e))) else: # roles were specified directly, so we'll just go out grab them # (and their dependencies, unless the user doesn't want us to). for rname in self.args: roles_left.append(GalaxyRole(self.galaxy, rname.strip())) for role in roles_left: display.vvv('Installing role %s ' % role.name) # query the galaxy API for the role data if role.install_info is not None and not force: display.display('- %s is already installed, skipping.' % role.name) continue try: installed = role.install() except AnsibleError as e: display.warning("- %s was NOT installed successfully: %s " % (role.name, str(e))) self.exit_without_ignore() continue # install dependencies, if we want them if not no_deps and installed: role_dependencies = role.metadata.get('dependencies') or [] for dep in role_dependencies: display.debug('Installing dep %s' % dep) dep_req = RoleRequirement() dep_info = dep_req.role_yaml_parse(dep) dep_role = GalaxyRole(self.galaxy, **dep_info) if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None: # we know we can skip this, as it's not going to # be found on galaxy.ansible.com continue if dep_role.install_info is None or force: if dep_role not in roles_left: display.display('- adding dependency: %s' % dep_role.name) roles_left.append(dep_role) else: display.display( '- dependency %s already pending installation.' % dep_role.name) else: display.display( '- dependency %s is already installed, skipping.' % dep_role.name) if not installed: display.warning("- %s was NOT installed successfully." % role.name) self.exit_without_ignore() return 0
def execute_init(self): """ creates the skeleton framework of a role that complies with the galaxy metadata format. """ init_path = self.options.init_path force = self.options.force role_skeleton = self.options.role_skeleton role_name = self.args.pop(0).strip() if self.args else None if not role_name: raise AnsibleOptionsError("- no role name specified for init") role_path = os.path.join(init_path, role_name) if os.path.exists(role_path): if os.path.isfile(role_path): raise AnsibleError( "- the path %s already exists, but is a file - aborting" % role_path) elif not force: raise AnsibleError( "- the directory %s already exists." "you can use --force to re-initialize this directory,\n" "however it will reset any main.yml files that may have\n" "been modified there already." % role_path) inject_data = dict( role_name=role_name, author='your name', description='your description', company='your company (optional)', license='license (GPLv2, CC-BY, etc)', issue_tracker_url='http://example.com/issue/tracker', min_ansible_version='2.4', role_type=self.options.role_type) # create role directory if not os.path.exists(role_path): os.makedirs(role_path) if role_skeleton is not None: skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE else: role_skeleton = self.galaxy.default_role_skeleton_path skeleton_ignore_expressions = ['^.*/.git_keep$'] role_skeleton = os.path.expanduser(role_skeleton) skeleton_ignore_re = [ re.compile(x) for x in skeleton_ignore_expressions ] template_env = Environment(loader=FileSystemLoader(role_skeleton)) for root, dirs, files in os.walk(role_skeleton, topdown=True): rel_root = os.path.relpath(root, role_skeleton) in_templates_dir = rel_root.split(os.sep, 1)[0] == 'templates' dirs[:] = [ d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re) ] for f in files: filename, ext = os.path.splitext(f) if any( r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re): continue elif ext == ".j2" and not in_templates_dir: src_template = os.path.join(rel_root, f) dest_file = os.path.join(role_path, rel_root, filename) template_env.get_template(src_template).stream( inject_data).dump(dest_file) else: f_rel_path = os.path.relpath(os.path.join(root, f), role_skeleton) shutil.copyfile(os.path.join(root, f), os.path.join(role_path, f_rel_path)) for d in dirs: dir_path = os.path.join(role_path, rel_root, d) if not os.path.exists(dir_path): os.makedirs(dir_path) display.display("- %s was created successfully" % role_name)
def get_config_value_and_origin(self, config, cfile=None, plugin_type=None, plugin_name=None, keys=None, variables=None, direct=None): ''' Given a config key figure out the actual value and report on the origin of the settings ''' if cfile is None: # use default config cfile = self._config_file # Note: sources that are lists listed in low to high precedence (last one wins) value = None origin = None defs = self.get_configuration_definitions(plugin_type, plugin_name) if config in defs: aliases = defs[config].get('aliases', []) # direct setting via plugin arguments, can set to None so we bypass rest of processing/defaults direct_aliases = [] if direct: direct_aliases = [ direct[alias] for alias in aliases if alias in direct ] if direct and config in direct: value = direct[config] origin = 'Direct' elif direct and direct_aliases: value = direct_aliases[0] origin = 'Direct' else: # Use 'variable overrides' if present, highest precedence, but only present when querying running play if variables and defs[config].get('vars'): value, origin = self._loop_entries(variables, defs[config]['vars']) origin = 'var: %s' % origin # use playbook keywords if you have em if value is None and keys: if config in keys: value = keys[config] keyword = config elif aliases: for alias in aliases: if alias in keys: value = keys[alias] keyword = alias break if value is not None: origin = 'keyword: %s' % keyword # env vars are next precedence if value is None and defs[config].get('env'): value, origin = self._loop_entries(py3compat.environ, defs[config]['env']) origin = 'env: %s' % origin # try config file entries next, if we have one if self._parsers.get(cfile, None) is None: self._parse_config_file(cfile) if value is None and cfile is not None: ftype = get_config_type(cfile) if ftype and defs[config].get(ftype): if ftype == 'ini': # load from ini config try: # FIXME: generalize _loop_entries to allow for files also, most of this code is dupe for ini_entry in defs[config]['ini']: temp_value = get_ini_config_value( self._parsers[cfile], ini_entry) if temp_value is not None: value = temp_value origin = cfile if 'deprecated' in ini_entry: self.DEPRECATED.append( ('[%s]%s' % (ini_entry['section'], ini_entry['key']), ini_entry['deprecated'])) except Exception as e: sys.stderr.write( "Error while loading ini config %s: %s" % (cfile, to_native(e))) elif ftype == 'yaml': # FIXME: implement, also , break down key from defs (. notation???) origin = cfile # set default if we got here w/o a value if value is None: if defs[config].get('required', False): if not plugin_type or config not in INTERNAL_DEFS.get( plugin_type, {}): raise AnsibleError( "No setting was provided for required configuration %s" % to_native( _get_entry(plugin_type, plugin_name, config))) else: value = defs[config].get('default') origin = 'default' # skip typing as this is a templated default that will be resolved later in constants, which has needed vars if plugin_type is None and isinstance( value, string_types) and (value.startswith('{{') and value.endswith('}}')): return value, origin # ensure correct type, can raise exceptions on mismatched types try: value = ensure_type(value, defs[config].get('type'), origin=origin) except ValueError as e: if origin.startswith('env:') and value == '': # this is empty env var for non string so we can set to default origin = 'default' value = ensure_type(defs[config].get('default'), defs[config].get('type'), origin=origin) else: raise AnsibleOptionsError( 'Invalid type for configuration option %s: %s' % (to_native(_get_entry(plugin_type, plugin_name, config)), to_native(e))) # deal with deprecation of the setting if 'deprecated' in defs[config] and origin != 'default': self.DEPRECATED.append( (config, defs[config].get('deprecated'))) else: raise AnsibleError( 'Requested entry (%s) was not defined in configuration.' % to_native(_get_entry(plugin_type, plugin_name, config))) return value, origin
def parse(self, inventory, loader, path, cache=True): super(InventoryModule, self).parse(inventory, loader, path) if not self.no_config_file_supplied and os.path.isfile(path): self._read_config_data(path) # Read inventory from tower server. # Note the environment variables will be handled automatically by InventoryManager. tower_host = self.get_option('host') if not re.match('(?:http|https)://', tower_host): tower_host = 'https://{tower_host}'.format(tower_host=tower_host) request_handler = Request( url_username=self.get_option('username'), url_password=self.get_option('password'), force_basic_auth=True, validate_certs=self.get_option('validate_certs')) # validate type of inventory_id because we allow two types as special case inventory_id = self.get_option('inventory_id') if isinstance(inventory_id, int): inventory_id = to_text(inventory_id, nonstring='simplerepr') else: try: inventory_id = ensure_type(inventory_id, 'str') except ValueError as e: raise AnsibleOptionsError( 'Invalid type for configuration option inventory_id, ' 'not integer, and cannot convert to string: {err}'.format( err=to_native(e))) inventory_id = inventory_id.replace('/', '') inventory_url = '/api/v2/inventories/{inv_id}/script/?hostvars=1&towervars=1&all=1'.format( inv_id=inventory_id) inventory_url = urljoin(tower_host, inventory_url) inventory = self.make_request(request_handler, inventory_url) # To start with, create all the groups. for group_name in inventory: if group_name != '_meta': self.inventory.add_group(group_name) # Then, create all hosts and add the host vars. all_hosts = inventory['_meta']['hostvars'] for host_name, host_vars in six.iteritems(all_hosts): self.inventory.add_host(host_name) for var_name, var_value in six.iteritems(host_vars): self.inventory.set_variable(host_name, var_name, var_value) # Lastly, create to group-host and group-group relationships, and set group vars. for group_name, group_content in six.iteritems(inventory): if group_name != 'all' and group_name != '_meta': # First add hosts to groups for host_name in group_content.get('hosts', []): self.inventory.add_host(host_name, group_name) # Then add the parent-children group relationships. for child_group_name in group_content.get('children', []): self.inventory.add_child(group_name, child_group_name) # Set the group vars. Note we should set group var for 'all', but not '_meta'. if group_name != '_meta': for var_name, var_value in six.iteritems( group_content.get('vars', {})): self.inventory.set_variable(group_name, var_name, var_value) # Fetch extra variables if told to do so if self.get_option('include_metadata'): config_url = urljoin(tower_host, '/api/v2/config/') config_data = self.make_request(request_handler, config_url) server_data = {} server_data['license_type'] = config_data.get( 'license_info', {}).get('license_type', 'unknown') for key in ('version', 'ansible_version'): server_data[key] = config_data.get(key, 'unknown') self.inventory.set_variable('all', 'tower_metadata', server_data) # Clean up the inventory. self.inventory.reconcile_inventory()
def _file_transport_command(self, in_path, out_path, sftp_action): # scp and sftp require square brackets for IPv6 addresses, but # accept them for hostnames and IPv4 addresses too. host = '[%s]' % self.host # Transfer methods to try methods = [] # Use the transfer_method option if set, otherwise use scp_if_ssh ssh_transfer_method = self._play_context.ssh_transfer_method if ssh_transfer_method is not None: if not (ssh_transfer_method in ('smart', 'sftp', 'scp', 'piped')): raise AnsibleOptionsError( 'transfer_method needs to be one of [smart|sftp|scp|piped]' ) if ssh_transfer_method == 'smart': methods = ['sftp', 'scp', 'piped'] else: methods = [ssh_transfer_method] else: # since this can be a non-bool now, we need to handle it correctly scp_if_ssh = C.DEFAULT_SCP_IF_SSH if not isinstance(scp_if_ssh, bool): scp_if_ssh = scp_if_ssh.lower() if scp_if_ssh in BOOLEANS: scp_if_ssh = boolean(scp_if_ssh, strict=False) elif scp_if_ssh != 'smart': raise AnsibleOptionsError( 'scp_if_ssh needs to be one of [smart|True|False]') if scp_if_ssh == 'smart': methods = ['sftp', 'scp', 'piped'] elif scp_if_ssh is True: methods = ['scp'] else: methods = ['sftp'] for method in methods: returncode = stdout = stderr = None if method == 'sftp': cmd = self._build_command(self.get_option('sftp_executable'), to_bytes(host)) in_data = u"{0} {1} {2}\n".format(sftp_action, shlex_quote(in_path), shlex_quote(out_path)) in_data = to_bytes(in_data, nonstring='passthru') (returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False) elif method == 'scp': scp = self.get_option('scp_executable') if sftp_action == 'get': cmd = self._build_command( scp, u'{0}:{1}'.format(host, shlex_quote(in_path)), out_path) else: cmd = self._build_command( scp, in_path, u'{0}:{1}'.format(host, shlex_quote(out_path))) in_data = None (returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False) elif method == 'piped': if sftp_action == 'get': # we pass sudoable=False to disable pty allocation, which # would end up mixing stdout/stderr and screwing with newlines (returncode, stdout, stderr) = self.exec_command( 'dd if=%s bs=%s' % (in_path, BUFSIZE), sudoable=False) out_file = open( to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') out_file.write(stdout) out_file.close() else: in_data = open( to_bytes(in_path, errors='surrogate_or_strict'), 'rb').read() in_data = to_bytes(in_data, nonstring='passthru') (returncode, stdout, stderr) = self.exec_command( 'dd of=%s bs=%s' % (out_path, BUFSIZE), in_data=in_data, sudoable=False) # Check the return code and rollover to next method if failed if returncode == 0: return (returncode, stdout, stderr) else: # If not in smart mode, the data will be printed by the raise below if len(methods) > 1: display.warning( msg= '%s transfer mechanism failed on %s. Use ANSIBLE_DEBUG=1 to see detailed information' % (method, host)) display.debug(msg='%s' % to_native(stdout)) display.debug(msg='%s' % to_native(stderr)) if returncode == 255: raise AnsibleConnectionFailure( "Failed to connect to the host via %s: %s" % (method, to_native(stderr))) else: raise AnsibleError("failed to transfer file to %s %s:\n%s\n%s" % (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
def get_hosts(self, pattern="all", ignore_limits=False, ignore_restrictions=False, order=None): """ Takes a pattern or list of patterns and returns a list of matching inventory host names, taking into account any active restrictions or applied subsets """ hosts = [] # Check if pattern already computed if isinstance(pattern, list): pattern_list = pattern[:] else: pattern_list = [pattern] if pattern_list: if not ignore_limits and self._subset: pattern_list.extend(self._subset) if not ignore_restrictions and self._restriction: pattern_list.extend(self._restriction) # This is only used as a hash key in the self._hosts_patterns_cache dict # a tuple is faster than stringifying pattern_hash = tuple(pattern_list) if pattern_hash not in self._hosts_patterns_cache: patterns = split_host_pattern(pattern) hosts = self._evaluate_patterns(patterns) # mainly useful for hostvars[host] access if not ignore_limits and self._subset: # exclude hosts not in a subset, if defined subset_uuids = set( s._uuid for s in self._evaluate_patterns(self._subset)) hosts = [h for h in hosts if h._uuid in subset_uuids] if not ignore_restrictions and self._restriction: # exclude hosts mentioned in any restriction (ex: failed hosts) hosts = [h for h in hosts if h.name in self._restriction] self._hosts_patterns_cache[pattern_hash] = deduplicate_list( hosts) # sort hosts list if needed (should only happen when called from strategy) if order in ['sorted', 'reverse_sorted']: hosts = sorted(self._hosts_patterns_cache[pattern_hash][:], key=attrgetter('name'), reverse=(order == 'reverse_sorted')) elif order == 'reverse_inventory': hosts = self._hosts_patterns_cache[pattern_hash][::-1] else: hosts = self._hosts_patterns_cache[pattern_hash][:] if order == 'shuffle': shuffle(hosts) elif order not in [None, 'inventory']: raise AnsibleOptionsError( "Invalid 'order' specified for inventory hosts: %s" % order) return hosts
def run(self): ''' create and execute the single task playbook ''' super(AdHocCLI, self).run() # only thing left should be host pattern pattern = to_text(self.args[0], errors='surrogate_or_strict') sshpass = None becomepass = None self.normalize_become_options() (sshpass, becomepass) = self.ask_passwords() passwords = {'conn_pass': sshpass, 'become_pass': becomepass} # dynamically load any plugins get_all_plugin_loaders() loader, inventory, variable_manager = self._play_prereqs(self.options) try: hosts = CLI.get_host_list(inventory, self.options.subset, pattern) except AnsibleError: if self.options.subset: raise else: hosts = [] display.warning("No hosts matched, nothing to do") if self.options.listhosts: display.display(' hosts (%d):' % len(hosts)) for host in hosts: display.display(' %s' % host) return 0 if self.options.module_name in C.MODULE_REQUIRE_ARGS and not self.options.module_args: err = "No argument passed to %s module" % self.options.module_name if pattern.endswith(".yml"): err = err + ' (did you mean to run ansible-playbook?)' raise AnsibleOptionsError(err) play_ds = self._play_ds(pattern, self.options.seconds, self.options.poll_interval) play = Play().load(play_ds, variable_manager=variable_manager, loader=loader) # used in start callback playbook = Playbook(loader) playbook._entries.append(play) playbook._file_name = '__adhoc_playbook__' if self.callback: cb = self.callback elif self.options.one_line: cb = 'oneline' # Respect custom 'stdout_callback' only with enabled 'bin_ansible_callbacks' elif C.DEFAULT_LOAD_CALLBACK_PLUGINS and C.DEFAULT_STDOUT_CALLBACK != 'default': cb = C.DEFAULT_STDOUT_CALLBACK else: cb = 'minimal' run_tree = False if self.options.tree: C.DEFAULT_CALLBACK_WHITELIST.append('tree') C.TREE_DIR = self.options.tree run_tree = True # now create a task queue manager to execute the play self._tqm = None try: self._tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options, passwords=passwords, stdout_callback=cb, run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, run_tree=run_tree, ) self._tqm.send_callback('v2_playbook_on_start', playbook) result = self._tqm.run(play) self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) finally: if self._tqm: self._tqm.cleanup() if loader: loader.cleanup_all_tmp_files() return result
def run(self): ''' use Runner lib to do SSH things ''' super(PullCLI, self).run() # log command line now = datetime.datetime.now() display.display(now.strftime("Starting Ansible Pull at %F %T")) display.display(' '.join(sys.argv)) # Build Checkout command # Now construct the ansible command node = platform.node() host = socket.getfqdn() limit_opts = 'localhost,%s,127.0.0.1' % ','.join(set([host, node, host.split('.')[0], node.split('.')[0]])) base_opts = '-c local ' if context.CLIARGS['verbosity'] > 0: base_opts += ' -%s' % ''.join(["v" for x in range(0, context.CLIARGS['verbosity'])]) # Attempt to use the inventory passed in as an argument # It might not yet have been downloaded so use localhost as default inv_opts = self._get_inv_cli() if not inv_opts: inv_opts = " -i localhost, " # avoid interpreter discovery since we already know which interpreter to use on localhost inv_opts += '-e %s ' % shlex_quote('ansible_python_interpreter=%s' % sys.executable) # SCM specific options if context.CLIARGS['module_name'] == 'git': repo_opts = "name=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest']) if context.CLIARGS['checkout']: repo_opts += ' version=%s' % context.CLIARGS['checkout'] if context.CLIARGS['accept_host_key']: repo_opts += ' accept_hostkey=yes' if context.CLIARGS['private_key_file']: repo_opts += ' key_file=%s' % context.CLIARGS['private_key_file'] if context.CLIARGS['verify']: repo_opts += ' verify_commit=yes' if context.CLIARGS['tracksubs']: repo_opts += ' track_submodules=yes' if not context.CLIARGS['fullclone']: repo_opts += ' depth=1' elif context.CLIARGS['module_name'] == 'subversion': repo_opts = "repo=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest']) if context.CLIARGS['checkout']: repo_opts += ' revision=%s' % context.CLIARGS['checkout'] if not context.CLIARGS['fullclone']: repo_opts += ' export=yes' elif context.CLIARGS['module_name'] == 'hg': repo_opts = "repo=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest']) if context.CLIARGS['checkout']: repo_opts += ' revision=%s' % context.CLIARGS['checkout'] elif context.CLIARGS['module_name'] == 'bzr': repo_opts = "name=%s dest=%s" % (context.CLIARGS['url'], context.CLIARGS['dest']) if context.CLIARGS['checkout']: repo_opts += ' version=%s' % context.CLIARGS['checkout'] else: raise AnsibleOptionsError('Unsupported (%s) SCM module for pull, choices are: %s' % (context.CLIARGS['module_name'], ','.join(self.REPO_CHOICES))) # options common to all supported SCMS if context.CLIARGS['clean']: repo_opts += ' force=yes' path = module_loader.find_plugin(context.CLIARGS['module_name']) if path is None: raise AnsibleOptionsError(("module '%s' not found.\n" % context.CLIARGS['module_name'])) bin_path = os.path.dirname(os.path.abspath(sys.argv[0])) # hardcode local and inventory/host as this is just meant to fetch the repo cmd = '%s/ansible %s %s -m %s -a "%s" all -l "%s"' % (bin_path, inv_opts, base_opts, context.CLIARGS['module_name'], repo_opts, limit_opts) for ev in context.CLIARGS['extra_vars']: cmd += ' -e %s' % shlex_quote(ev) # Nap? if context.CLIARGS['sleep']: display.display("Sleeping for %d seconds..." % context.CLIARGS['sleep']) time.sleep(context.CLIARGS['sleep']) # RUN the Checkout command display.debug("running ansible with VCS module to checkout repo") display.vvvv('EXEC: %s' % cmd) rc, b_out, b_err = run_cmd(cmd, live=True) if rc != 0: if context.CLIARGS['force']: display.warning("Unable to update repository. Continuing with (forced) run of playbook.") else: return rc elif context.CLIARGS['ifchanged'] and b'"changed": true' not in b_out: display.display("Repository has not changed, quitting.") return 0 playbook = self.select_playbook(context.CLIARGS['dest']) if playbook is None: raise AnsibleOptionsError("Could not find a playbook to run.") # Build playbook command cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook) if context.CLIARGS['vault_password_files']: for vault_password_file in context.CLIARGS['vault_password_files']: cmd += " --vault-password-file=%s" % vault_password_file if context.CLIARGS['vault_ids']: for vault_id in context.CLIARGS['vault_ids']: cmd += " --vault-id=%s" % vault_id for ev in context.CLIARGS['extra_vars']: cmd += ' -e %s' % shlex_quote(ev) if context.CLIARGS['become_ask_pass']: cmd += ' --ask-become-pass' if context.CLIARGS['skip_tags']: cmd += ' --skip-tags "%s"' % to_native(u','.join(context.CLIARGS['skip_tags'])) if context.CLIARGS['tags']: cmd += ' -t "%s"' % to_native(u','.join(context.CLIARGS['tags'])) if context.CLIARGS['subset']: cmd += ' -l "%s"' % context.CLIARGS['subset'] else: cmd += ' -l "%s"' % limit_opts if context.CLIARGS['check']: cmd += ' -C' if context.CLIARGS['diff']: cmd += ' -D' os.chdir(context.CLIARGS['dest']) # redo inventory options as new files might exist now inv_opts = self._get_inv_cli() if inv_opts: cmd += inv_opts # RUN THE PLAYBOOK COMMAND display.debug("running ansible-playbook to do actual work") display.debug('EXEC: %s' % cmd) rc, b_out, b_err = run_cmd(cmd, live=True) if context.CLIARGS['purge']: os.chdir('/') try: shutil.rmtree(context.CLIARGS['dest']) except Exception as e: display.error(u"Failed to remove %s: %s" % (context.CLIARGS['dest'], to_text(e))) return rc
def execute_create(self): if len(self.args) > 1: raise AnsibleOptionsError("ansible-vault create can take only one filename argument") self.editor.create_file(self.args[0])
def parse(self): self.parser = CLI.base_parser( vault_opts=True, usage = "usage: %%prog [%s] [--help] [options] vaultfile.yml" % "|".join(self.VALID_ACTIONS), epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) self.set_action() # options specific to self.actions if self.action == "create": self.parser.set_usage("usage: %prog create [options] file_name") elif self.action == "decrypt": self.parser.set_usage("usage: %prog decrypt [options] file_name") elif self.action == "edit": self.parser.set_usage("usage: %prog edit [options] file_name") elif self.action == "view": self.parser.set_usage("usage: %prog view [options] file_name") elif self.action == "encrypt": self.parser.set_usage("usage: %prog encrypt [options] file_name") # I have no prefence for either dash or underscore elif self.action == "encrypt_string": self.parser.add_option('-p', '--prompt', dest='encrypt_string_prompt', action='store_true', help="Prompt for the string to encrypt") self.parser.add_option('-n', '--name', dest='encrypt_string_names', action='append', help="Specify the variable name") self.parser.add_option('--stdin-name', dest='encrypt_string_stdin_name', default=None, help="Specify the variable name for stdin") self.parser.set_usage("usage: %prog encrypt-string [--prompt] [options] string_to_encrypt") elif self.action == "rekey": self.parser.set_usage("usage: %prog rekey [options] file_name") super(VaultCLI, self).parse() display.verbosity = self.options.verbosity can_output = ['encrypt', 'decrypt', 'encrypt_string'] if self.action not in can_output: if self.options.output_file: raise AnsibleOptionsError("The --output option can be used only with ansible-vault %s" % '/'.join(can_output)) if len(self.args) == 0: raise AnsibleOptionsError("Vault requires at least one filename as a parameter") else: # This restriction should remain in place until it's possible to # load multiple YAML records from a single file, or it's too easy # to create an encrypted file that can't be read back in. But in # the meanwhile, "cat a b c|ansible-vault encrypt --output x" is # a workaround. if self.options.output_file and len(self.args) > 1: raise AnsibleOptionsError("At most one input file may be used with the --output option") if self.action == 'encrypt_string': if '-' in self.args or len(self.args) == 0 or self.options.encrypt_string_stdin_name: self.encrypt_string_read_stdin = True # TODO: prompting from stdin and reading from stdin seem # mutually exclusive, but verify that. if self.options.encrypt_string_prompt and self.encrypt_string_read_stdin: raise AnsibleOptionsError('The --prompt option is not supported if also reading input from stdin')
def parse(self): ''' create an options parser for bin/ansible ''' self.parser = CLI.base_parser( usage='%prog <host-pattern> [options]', connect_opts=True, vault_opts=True, runtask_opts=True, ) # options unique to pull self.parser.add_option('--purge', default=False, action='store_true', help='purge checkout after playbook run') self.parser.add_option( '-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true', help='only run the playbook if the repository has been updated') self.parser.add_option( '-s', '--sleep', dest='sleep', default=None, help= 'sleep for random interval (between 0 and n number of seconds) before starting. This is a useful way to disperse git requests' ) self.parser.add_option( '-f', '--force', dest='force', default=False, action='store_true', help='run the playbook even if the repository could not be updated' ) self.parser.add_option('-d', '--directory', dest='dest', default=None, help='directory to checkout repository to') self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository') self.parser.add_option('-C', '--checkout', dest='checkout', help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.') self.parser.add_option( '--accept-host-key', default=False, dest='accept_host_key', action='store_true', help='adds the hostkey for the repo url if not already added') self.parser.add_option( '-m', '--module-name', dest='module_name', default=self.DEFAULT_REPO_TYPE, help= 'Repository module name, which ansible will use to check out the repo. Default is %s.' % self.DEFAULT_REPO_TYPE) self.parser.add_option( '--verify-commit', dest='verify', default=False, action='store_true', help= 'verify GPG signature of checked out commit, if it fails abort running the playbook.' ' This needs the corresponding VCS module to support such an operation' ) self.options, self.args = self.parser.parse_args() if self.options.sleep: try: secs = random.randint(0, int(self.options.sleep)) self.options.sleep = secs except ValueError: raise AnsibleOptionsError("%s is not a number." % self.options.sleep) if not self.options.url: raise AnsibleOptionsError( "URL for repository not specified, use -h for help") if len(self.args) != 1: raise AnsibleOptionsError("Missing target hosts") if self.options.module_name not in self.SUPPORTED_REPO_MODULES: raise AnsibleOptionsError( "Unsuported repo module %s, choices are %s" % (self.options.module_name, ','.join( self.SUPPORTED_REPO_MODULES))) self.display.verbosity = self.options.verbosity self.validate_conflicts(vault_opts=True)
def run(self): ''' use Runner lib to do SSH things ''' super(PullCLI, self).run() # log command line now = datetime.datetime.now() self.display.display(now.strftime("Starting Ansible Pull at %F %T")) self.display.display(' '.join(sys.argv)) # Build Checkout command # Now construct the ansible command limit_opts = 'localhost:%s:127.0.0.1' % socket.getfqdn() base_opts = '-c local "%s"' % limit_opts if self.options.verbosity > 0: base_opts += ' -%s' % ''.join( ["v" for x in range(0, self.options.verbosity)]) # Attempt to use the inventory passed in as an argument # It might not yet have been downloaded so use localhost if note if not self.options.inventory or not os.path.exists( self.options.inventory): inv_opts = 'localhost,' else: inv_opts = self.options.inventory #TODO: enable more repo modules hg/svn? if self.options.module_name == 'git': repo_opts = "name=%s dest=%s" % (self.options.url, self.options.dest) if self.options.checkout: repo_opts += ' version=%s' % self.options.checkout if self.options.accept_host_key: repo_opts += ' accept_hostkey=yes' if self.options.private_key_file: repo_opts += ' key_file=%s' % self.options.private_key_file if self.options.verify: repo_opts += ' verify_commit=yes' path = module_loader.find_plugin(self.options.module_name) if path is None: raise AnsibleOptionsError( ("module '%s' not found.\n" % self.options.module_name)) bin_path = os.path.dirname(os.path.abspath(sys.argv[0])) cmd = '%s/ansible -i "%s" %s -m %s -a "%s"' % ( bin_path, inv_opts, base_opts, self.options.module_name, repo_opts) for ev in self.options.extra_vars: cmd += ' -e "%s"' % ev # Nap? if self.options.sleep: self.display.display("Sleeping for %d seconds..." % self.options.sleep) time.sleep(self.options.sleep) # RUN the Checkout command rc, out, err = run_cmd(cmd, live=True) if rc != 0: if self.options.force: self.display.warning( "Unable to update repository. Continuing with (forced) run of playbook." ) else: return rc elif self.options.ifchanged and '"changed": true' not in out: self.display.display("Repository has not changed, quitting.") return 0 playbook = self.select_playbook(path) if playbook is None: raise AnsibleOptionsError("Could not find a playbook to run.") # Build playbook command cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook) if self.options.vault_password_file: cmd += " --vault-password-file=%s" % self.options.vault_password_file if self.options.inventory: cmd += ' -i "%s"' % self.options.inventory for ev in self.options.extra_vars: cmd += ' -e "%s"' % ev if self.options.ask_sudo_pass: cmd += ' -K' if self.options.tags: cmd += ' -t "%s"' % self.options.tags os.chdir(self.options.dest) # RUN THE PLAYBOOK COMMAND rc, out, err = run_cmd(cmd, live=True) if self.options.purge: os.chdir('/') try: shutil.rmtree(self.options.dest) except Exception, e: self.display.error("Failed to remove %s: %s" % (self.options.dest, str(e)))
def run(self): super(DocCLI, self).run() plugin_type = self.options.type # choose plugin type if plugin_type == 'cache': loader = cache_loader elif plugin_type == 'callback': loader = callback_loader elif plugin_type == 'connection': loader = connection_loader elif plugin_type == 'lookup': loader = lookup_loader elif plugin_type == 'strategy': loader = strategy_loader elif plugin_type == 'inventory': loader = PluginLoader('InventoryModule', 'ansible.plugins.inventory', 'inventory_plugins', 'inventory_plugins') else: loader = module_loader # add to plugin path from command line if self.options.module_path is not None: for i in self.options.module_path.split(os.pathsep): loader.add_directory(i) # list plugins for type if self.options.list_dir: paths = loader._get_paths() for path in paths: self.find_plugins(path, plugin_type) self.pager(self.get_plugin_list_text(loader)) return 0 # process all plugins of type if self.options.all_plugins: paths = loader._get_paths() for path in paths: self.find_plugins(path, plugin_type) if len(self.args) == 0: raise AnsibleOptionsError("Incorrect options passed") # process command line list text = '' for plugin in self.args: try: # if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs filename = loader.find_plugin(plugin, mod_type='.py', ignore_deprecated=True) if filename is None: display.warning( "%s %s not found in %s\n" % (plugin_type, plugin, DocCLI.print_paths(loader))) continue if any(filename.endswith(x) for x in C.BLACKLIST_EXTS): continue try: doc, plainexamples, returndocs, metadata = plugin_docs.get_docstring( filename, verbose=(self.options.verbosity > 0)) except: display.vvv(traceback.format_exc()) display.error( "%s %s has a documentation error formatting or is missing documentation." % (plugin_type, plugin)) continue if doc is not None: # assign from other sections doc['plainexamples'] = plainexamples doc['returndocs'] = returndocs doc['metadata'] = metadata # generate extra data if plugin_type == 'module': # is there corresponding action plugin? if plugin in action_loader: doc['action'] = True else: doc['action'] = False doc['filename'] = filename doc['now_date'] = datetime.date.today().strftime( '%Y-%m-%d') doc['docuri'] = doc[plugin_type].replace('_', '-') if self.options.show_snippet and plugin_type == 'module': text += self.get_snippet_text(doc) else: text += self.get_man_text(doc) else: # this typically means we couldn't even parse the docstring, not just that the YAML is busted, # probably a quoting issue. raise AnsibleError("Parsing produced an empty object.") except Exception as e: display.vvv(traceback.format_exc()) raise AnsibleError( "%s %s missing documentation (or could not parse documentation): %s\n" % (plugin_type, plugin, str(e))) if text: self.pager(text) return 0
def run(self): ''' create and execute the single task playbook ''' super(AdHocCLI, self).run() # only thing left should be host pattern pattern = to_text(context.CLIARGS['args'][0], errors='surrogate_or_strict') sshpass = None becomepass = None (sshpass, becomepass) = self.ask_passwords() passwords = {'conn_pass': sshpass, 'become_pass': becomepass} # get basic objects loader, inventory, variable_manager = self._play_prereqs() try: hosts = self.get_host_list(inventory, context.CLIARGS['subset'], pattern) except AnsibleError: if context.CLIARGS['subset']: raise else: hosts = [] display.warning("No hosts matched, nothing to do") if context.CLIARGS['listhosts']: display.display(' hosts (%d):' % len(hosts)) for host in hosts: display.display(' %s' % host) return 0 if context.CLIARGS['module_name'] in C.MODULE_REQUIRE_ARGS and not context.CLIARGS['module_args']: err = "No argument passed to %s module" % context.CLIARGS['module_name'] if pattern.endswith(".yml"): err = err + ' (did you mean to run ansible-playbook?)' raise AnsibleOptionsError(err) # Avoid modules that don't work with ad-hoc if context.CLIARGS['module_name'] in ('import_playbook',): raise AnsibleOptionsError("'%s' is not a valid action for ad-hoc commands" % context.CLIARGS['module_name']) play_ds = self._play_ds(pattern, context.CLIARGS['seconds'], context.CLIARGS['poll_interval']) play = Play().load(play_ds, variable_manager=variable_manager, loader=loader) # used in start callback playbook = Playbook(loader) playbook._entries.append(play) playbook._file_name = '__adhoc_playbook__' if self.callback: cb = self.callback elif context.CLIARGS['one_line']: cb = 'oneline' # Respect custom 'stdout_callback' only with enabled 'bin_ansible_callbacks' elif C.DEFAULT_LOAD_CALLBACK_PLUGINS and C.DEFAULT_STDOUT_CALLBACK != 'default': cb = C.DEFAULT_STDOUT_CALLBACK else: cb = 'minimal' run_tree = False if context.CLIARGS['tree']: C.DEFAULT_CALLBACK_WHITELIST.append('tree') C.TREE_DIR = context.CLIARGS['tree'] run_tree = True # now create a task queue manager to execute the play self._tqm = None try: self._tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=passwords, stdout_callback=cb, run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, run_tree=run_tree, forks=context.CLIARGS['forks'], ) self._tqm.send_callback('v2_playbook_on_start', playbook) result = self._tqm.run(play) self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) finally: if self._tqm: self._tqm.cleanup() if loader: loader.cleanup_all_tmp_files() return result
def run(self): super(VaultCLI, self).run() loader = DataLoader() # set default restrictive umask old_umask = os.umask(0o077) vault_ids = self.options.vault_ids # there are 3 types of actions, those that just 'read' (decrypt, view) and only # need to ask for a password once, and those that 'write' (create, encrypt) that # ask for a new password and confirm it, and 'read/write (rekey) that asks for the # old password, then asks for a new one and confirms it. default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST vault_ids = default_vault_ids + vault_ids # TODO: instead of prompting for these before, we could let VaultEditor # call a callback when it needs it. if self.action in ['decrypt', 'view', 'rekey', 'edit']: vault_secrets = self.setup_vault_secrets( loader, vault_ids=vault_ids, vault_password_files=self.options.vault_password_files, ask_vault_pass=self.options.ask_vault_pass) if not vault_secrets: raise AnsibleOptionsError( "A vault password is required to use Ansible's Vault") if self.action in ['encrypt', 'encrypt_string', 'create']: if len(vault_ids) > 1: raise AnsibleOptionsError( "Only one --vault-id can be used for encryption") vault_secrets = None vault_secrets = \ self.setup_vault_secrets(loader, vault_ids=vault_ids, vault_password_files=self.options.vault_password_files, ask_vault_pass=self.options.ask_vault_pass, create_new_password=True) if len(vault_secrets) > 1: raise AnsibleOptionsError( "Only one --vault-id can be used for encryption. This includes passwords from configuration and cli." ) if not vault_secrets: raise AnsibleOptionsError( "A vault password is required to use Ansible's Vault") encrypt_secret = match_encrypt_secret(vault_secrets) # only one secret for encrypt for now, use the first vault_id and use its first secret # self.encrypt_vault_id = list(vault_secrets.keys())[0] # self.encrypt_secret = vault_secrets[self.encrypt_vault_id][0] self.encrypt_vault_id = encrypt_secret[0] self.encrypt_secret = encrypt_secret[1] if self.action in ['rekey']: new_vault_ids = [] if self.options.new_vault_id: new_vault_ids.append(self.options.new_vault_id) new_vault_secrets = \ self.setup_vault_secrets(loader, vault_ids=new_vault_ids, vault_password_files=self.options.new_vault_password_files, ask_vault_pass=self.options.ask_vault_pass, create_new_password=True) if not new_vault_secrets: raise AnsibleOptionsError( "A new vault password is required to use Ansible's Vault rekey" ) # There is only one new_vault_id currently and one new_vault_secret new_encrypt_secret = match_encrypt_secret(new_vault_secrets) self.new_encrypt_vault_id = new_encrypt_secret[0] self.new_encrypt_secret = new_encrypt_secret[1] loader.set_vault_secrets(vault_secrets) # FIXME: do we need to create VaultEditor here? its not reused vault = VaultLib(vault_secrets) self.editor = VaultEditor(vault) self.execute() # and restore umask os.umask(old_umask)
def execute_install(self): """ Executes the installation action. The args list contains the roles to be installed, unless -f was specified. The list of roles can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file. """ role_file = self.get_opt("role_file", None) if len(self.args) == 0 and role_file is None: # the user needs to specify one of either --role-file # or specify a single user/role name raise AnsibleOptionsError( "- you must specify a user/role name or a roles file") elif len(self.args) == 1 and not role_file is None: # using a role file is mutually exclusive of specifying # the role name on the command line raise AnsibleOptionsError( "- please specify a user/role name, or a roles file, but not both" ) no_deps = self.get_opt("no_deps", False) roles_path = self.get_opt("roles_path") roles_done = [] roles_left = [] if role_file: f = open(role_file, 'r') if role_file.endswith('.yaml') or role_file.endswith('.yml'): roles_left = map(ansible.utils.role_yaml_parse, yaml.safe_load(f)) else: # roles listed in a file, one per line for rname in f.readlines(): roles_left.append(GalaxyRole(self.galaxy, rname.strip())) f.close() else: # roles were specified directly, so we'll just go out grab them # (and their dependencies, unless the user doesn't want us to). for rname in self.args: roles_left.append(GalaxyRole(self.galaxy, rname.strip())) while len(roles_left) > 0: # query the galaxy API for the role data role_data = None role = roles_left.pop(0) role_path = role.path if role_path: self.options.roles_path = role_path else: self.options.roles_path = roles_path tmp_file = None installed = False if role.src and os.path.isfile(role.src): # installing a local tar.gz tmp_file = role.src else: if role.scm: # create tar file from scm url tmp_file = scm_archive_role(role.scm, role.src, role.version, role.name) if role.src: if '://' not in role.src: role_data = self.api.lookup_role_by_name(role.src) if not role_data: self.display.warning( "- sorry, %s was not found on %s." % (role.src, self.options.api_server)) self.exit_without_ignore() continue role_versions = self.api.fetch_role_related( 'versions', role_data['id']) if not role.version: # convert the version names to LooseVersion objects # and sort them to get the latest version. If there # are no versions in the list, we'll grab the head # of the master branch if len(role_versions) > 0: loose_versions = [ LooseVersion(a.get('name', None)) for a in role_versions ] loose_versions.sort() role.version = str(loose_versions[-1]) else: role.version = 'master' elif role.version != 'master': if role_versions and role.version not in [ a.get('name', None) for a in role_versions ]: self.display.warning('role is %s' % role) self.display.warning( "- the specified version (%s) was not found in the list of available versions (%s)." % (role.version, role_versions)) self.exit_without_ignore() continue # download the role. if --no-deps was specified, we stop here, # otherwise we recursively grab roles and all of their deps. tmp_file = role.fetch(role_data) if tmp_file: installed = role.install(tmp_file) # we're done with the temp file, clean it up if tmp_file != role.src: os.unlink(tmp_file) # install dependencies, if we want them # this should use new roledepenencies code #if not no_deps and installed: # if not role_data: # role_data = gr.get_metadata(role.get("name"), options) # role_dependencies = role_data['dependencies'] # else: # role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id']) # for dep in role_dependencies: # if isinstance(dep, basestring): # dep = ansible.utils.role_spec_parse(dep) # else: # dep = ansible.utils.role_yaml_parse(dep) # if not get_role_metadata(dep["name"], options): # if dep not in roles_left: # print '- adding dependency: %s' % dep["name"] # roles_left.append(dep) # else: # print '- dependency %s already pending installation.' % dep["name"] # else: # print '- dependency %s is already installed, skipping.' % dep["name"] if not tmp_file or not installed: self.display.warning("- %s was NOT installed successfully." % role.name) self.exit_without_ignore() return 0
def execute_encrypt_string(self): ''' encrypt the supplied string using the provided vault secret ''' b_plaintext = None # Holds tuples (the_text, the_source_of_the_string, the variable name if its provided). b_plaintext_list = [] # remove the non-option '-' arg (used to indicate 'read from stdin') from the candidate args so # we don't add it to the plaintext list args = [x for x in self.args if x != '-'] # We can prompt and read input, or read from stdin, but not both. if self.options.encrypt_string_prompt: msg = "String to encrypt: " name = None name_prompt_response = display.prompt( 'Variable name (enter for no name): ') # TODO: enforce var naming rules? if name_prompt_response != "": name = name_prompt_response # TODO: could prompt for which vault_id to use for each plaintext string # currently, it will just be the default # could use private=True for shadowed input if useful prompt_response = display.prompt(msg) if prompt_response == '': raise AnsibleOptionsError( 'The plaintext provided from the prompt was empty, not encrypting' ) b_plaintext = to_bytes(prompt_response) b_plaintext_list.append((b_plaintext, self.FROM_PROMPT, name)) # read from stdin if self.encrypt_string_read_stdin: if sys.stdout.isatty(): display.display( "Reading plaintext input from stdin. (ctrl-d to end input)", stderr=True) stdin_text = sys.stdin.read() if stdin_text == '': raise AnsibleOptionsError('stdin was empty, not encrypting') b_plaintext = to_bytes(stdin_text) # defaults to None name = self.options.encrypt_string_stdin_name b_plaintext_list.append((b_plaintext, self.FROM_STDIN, name)) # use any leftover args as strings to encrypt # Try to match args up to --name options if hasattr( self.options, 'encrypt_string_names') and self.options.encrypt_string_names: name_and_text_list = list( zip(self.options.encrypt_string_names, args)) # Some but not enough --name's to name each var if len(args) > len(name_and_text_list): # Trying to avoid ever showing the plaintext in the output, so this warning is vague to avoid that. display.display( 'The number of --name options do not match the number of args.', stderr=True) display.display( 'The last named variable will be "%s". The rest will not have names.' % self.options.encrypt_string_names[-1], stderr=True) # Add the rest of the args without specifying a name for extra_arg in args[len(name_and_text_list):]: name_and_text_list.append((None, extra_arg)) # if no --names are provided, just use the args without a name. else: name_and_text_list = [(None, x) for x in args] # Convert the plaintext text objects to bytestrings and collect for name_and_text in name_and_text_list: name, plaintext = name_and_text if plaintext == '': raise AnsibleOptionsError( 'The plaintext provided from the command line args was empty, not encrypting' ) b_plaintext = to_bytes(plaintext) b_plaintext_list.append((b_plaintext, self.FROM_ARGS, name)) # TODO: specify vault_id per string? # Format the encrypted strings and any corresponding stderr output outputs = self._format_output_vault_strings( b_plaintext_list, vault_id=self.encrypt_vault_id) for output in outputs: err = output.get('err', None) out = output.get('out', '') if err: sys.stderr.write(err) print(out) if sys.stdout.isatty(): display.display("Encryption successful", stderr=True)
def run(self): super(DocCLI, self).run() plugin_type = context.CLIARGS['type'] if plugin_type in C.DOCUMENTABLE_PLUGINS: loader = getattr(plugin_loader, '%s_loader' % plugin_type) else: raise AnsibleOptionsError( "Unknown or undocumentable plugin type: %s" % plugin_type) # add to plugin path from command line if context.CLIARGS['module_path']: for path in context.CLIARGS['module_path']: if path: loader.add_directory(path) # save only top level paths for errors search_paths = DocCLI.print_paths(loader) loader._paths = None # reset so we can use subdirs below # list plugins names and filepath for type if context.CLIARGS['list_files']: paths = loader._get_paths() for path in paths: self.plugin_list.update(DocCLI.find_plugins(path, plugin_type)) list_text = self.get_plugin_list_filenames(loader) DocCLI.pager(list_text) return 0 # list plugins for type if context.CLIARGS['list_dir']: paths = loader._get_paths() for path in paths: self.plugin_list.update(DocCLI.find_plugins(path, plugin_type)) DocCLI.pager(self.get_plugin_list_text(loader)) return 0 # dump plugin desc/metadata as JSON if context.CLIARGS['json_dump']: plugin_data = {} plugin_names = DocCLI.get_all_plugins_of_type(plugin_type) for plugin_name in plugin_names: plugin_info = DocCLI.get_plugin_metadata( plugin_type, plugin_name) if plugin_info is not None: plugin_data[plugin_name] = plugin_info DocCLI.pager(json.dumps(plugin_data, sort_keys=True, indent=4)) return 0 if len(context.CLIARGS['args']) == 0: raise AnsibleOptionsError("Incorrect options passed") # process command line list text = '' for plugin in context.CLIARGS['args']: textret = DocCLI.format_plugin_doc(plugin, loader, plugin_type, search_paths) if textret: text += textret if text: DocCLI.pager(text) return 0
def parse(self): self.parser = CLI.base_parser( usage= 'usage: %prog [-l|-F|-s] [options] [-t <plugin type> ] [plugin]', module_opts=True, desc="plugin documentation tool", epilog= "See man pages for Ansible CLI options or website for tutorials https://docs.ansible.com" ) self.parser.add_option( "-F", "--list_files", action="store_true", default=False, dest="list_files", help= 'Show plugin names and their source files without summaries (implies --list)' ) self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir', help='List available plugins') self.parser.add_option( "-s", "--snippet", action="store_true", default=False, dest='show_snippet', help='Show playbook snippet for specified plugin(s)') self.parser.add_option( "-a", "--all", action="store_true", default=False, dest='all_plugins', help= '**For internal testing only** Show documentation for all plugins.' ) self.parser.add_option( "-j", "--json", action="store_true", default=False, dest='json_dump', help= '**For internal testing only** Dump json metadata for all plugins.' ) self.parser.add_option( "-t", "--type", action="store", default='module', dest='type', type='choice', help='Choose which plugin type (defaults to "module")', choices=C.DOCUMENTABLE_PLUGINS) super(DocCLI, self).parse() if [ self.options.all_plugins, self.options.json_dump, self.options.list_dir, self.options.list_files, self.options.show_snippet ].count(True) > 1: raise AnsibleOptionsError( "Only one of -l, -F, -s, -j or -a can be used at the same time." ) display.verbosity = self.options.verbosity
def get_hosts(self, pattern="all", ignore_limits=False, ignore_restrictions=False, order=None): """ Takes a pattern or list of patterns and returns a list of matching inventory host names, taking into account any active restrictions or applied subsets """ hosts = [] # Check if pattern already computed if isinstance(pattern, list): pattern_hash = u":".join(pattern) else: pattern_hash = pattern if pattern_hash: if not ignore_limits and self._subset: pattern_hash += u":%s" % to_text(self._subset, errors='surrogate_or_strict') if not ignore_restrictions and self._restriction: pattern_hash += u":%s" % to_text(self._restriction, errors='surrogate_or_strict') if pattern_hash not in self._hosts_patterns_cache: patterns = split_host_pattern(pattern) hosts = self._evaluate_patterns(patterns) # mainly useful for hostvars[host] access if not ignore_limits and self._subset: # exclude hosts not in a subset, if defined subset = self._evaluate_patterns(self._subset) hosts = [h for h in hosts if h in subset] if not ignore_restrictions and self._restriction: # exclude hosts mentioned in any restriction (ex: failed hosts) hosts = [h for h in hosts if h.name in self._restriction] seen = set() self._hosts_patterns_cache[pattern_hash] = [ x for x in hosts if x not in seen and not seen.add(x) ] # sort hosts list if needed (should only happen when called from strategy) if order in ['sorted', 'reverse_sorted']: from operator import attrgetter hosts = sorted(self._hosts_patterns_cache[pattern_hash][:], key=attrgetter('name'), reverse=(order == 'reverse_sorted')) elif order == 'reverse_inventory': hosts = sorted(self._hosts_patterns_cache[pattern_hash][:], reverse=True) else: hosts = self._hosts_patterns_cache[pattern_hash][:] if order == 'shuffle': from random import shuffle shuffle(hosts) elif order not in [None, 'inventory']: AnsibleOptionsError( "Invalid 'order' specified for inventory hosts: %s" % order) return hosts
def run(self): super(DocCLI, self).run() if self.options.module_path is not None: for i in self.options.module_path.split(os.pathsep): module_loader.add_directory(i) # list modules if self.options.list_dir: paths = module_loader._get_paths() for path in paths: self.find_modules(path) CLI.pager(self.get_module_list_text()) return 0 if len(self.args) == 0: raise AnsibleOptionsError("Incorrect options passed") # process command line module list text = '' for module in self.args: try: filename = module_loader.find_plugin(module) if filename is None: self.display.warning( "module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader))) continue if any(filename.endswith(x) for x in self.BLACKLIST_EXTS): continue try: doc, plainexamples, returndocs = module_docs.get_docstring( filename) except: self.display.vvv(traceback.print_exc()) self.display.error( "module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module) continue if doc is not None: all_keys = [] for (k, v) in doc['options'].iteritems(): all_keys.append(k) all_keys = sorted(all_keys) doc['option_keys'] = all_keys doc['filename'] = filename doc['docuri'] = doc['module'].replace('_', '-') doc['now_date'] = datetime.date.today().strftime( '%Y-%m-%d') doc['plainexamples'] = plainexamples doc['returndocs'] = returndocs if self.options.show_snippet: text += DocCLI.get_snippet_text(doc) else: text += DocCLI.get_man_text(doc) else: # this typically means we couldn't even parse the docstring, not just that the YAML is busted, # probably a quoting issue. raise AnsibleError("Parsing produced an empty object.") except Exception, e: self.display.vvv(traceback.print_exc()) raise AnsibleError( "module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e)))
def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() # uncomment to enable request debugging #try: # import http.client as http_client #except ImportError: # # Python 2 # import httplib as http_client # http_client.HTTPConnection.debuglevel = 1 ## #import logging #logLevel = logging.DEBUG #logging.basicConfig() #logging.getLogger().setLevel(logLevel) #requests_log = logging.getLogger("requests.packages.urllib3") #requests_log.setLevel(logLevel) #requests_log.propagate = True result = super(ActionModule, self).run(tmp, task_vars) self._task_vars = task_vars changed = False no_log = self._play_context.no_log if not no_log: display.v("args: %s" % (self._task.args)) check_mode = task_vars['ansible_check_mode'] display.v("check_mode: %s" % (check_mode)) try: # Get the tasmota host tasmota_host = self._get_arg_or_var('tasmota_host', task_vars['ansible_host']) command = self._get_arg_or_var('command') incoming_value = self._get_arg_or_var('value', None, False) if incoming_value is None: # early return when incoming_value is not provided result["changed"] = False result["skipped"] = True return result except Exception as err: display.v("got an exception: %s" % (err)) display.v("got an exception: " + err.message) return self._fail_result( result, "error during retrieving parameter '%s'" % (err.message)) if not no_log: display.v("incoming_value %s" % (incoming_value)) auth_params = {} try: user = self._get_arg_or_var("tasmota_user") password = self._get_arg_or_var('tasmota_password') auth_params = {'user': user, 'password': password} display.v("authentication parameters: %s" % (auth_params)) except: pass # Enable retries due to reboot of the devices session = requests.Session() session.mount("http://%s" % (tasmota_host), HTTPAdapter(Retry(total=5, backoff_factor=1.0))) endpoint_uri = "http://%s/cm" % (tasmota_host) status_params = copy.deepcopy(auth_params) status_params.update({'cmnd': command}) # execute command status_response = requests.get(url=endpoint_uri, params=status_params) # get response data data = status_response.json() display.v("data: %s, response code: %s" % (data, status_response.status_code)) warnings = [] resp_warn = data.get("WARNING") if resp_warn: # Prior to v8.2.3 authorization error has 200 ok status if status_response.status_code == 401 or resp_warn == "Need user=<username>&password=<password>": raise AnsibleAuthenticationFailure( "Missing/Invalid credentials") warnings.append(resp_warn) if status_response.status_code != 200: raise AnsibleRuntimeError("Unexpected response code: %s" % (status_response.status_code)) existing_value = unicode(data.get(command)) if (command.startswith('Rule')): display.vv("rule found!") existing_once = data.get("Once") existing_rules = data.get("Rules") existing_rule = data.get(command) existing_stop_on_error = data.get("StopOnError") if incoming_value in ["0", "1", "2"]: display.vv("disable, enable, toggle rule found") existing_value = self._translateResultStr(existing_value) elif incoming_value in ["4", "5"]: display.vv("disable, enable oneshot") existing_value = self._translateResultStr( existing_once, "4", "5") elif incoming_value.startswith("on"): display.vv("rule value found") existing_value = existing_rules elif (command.startswith('SetOption')): existing_value = self._translateResultStr(existing_value) elif (command.startswith('PowerRetain')): existing_value = self._translateResultStr(existing_value) elif (command == 'Module'): modules_ids = data.get(command).keys() existing_value = next(iter(modules_ids)) elif (command.startswith('Gpio')): gpios = data.get(command.upper()).keys() existing_value = next(iter(gpios)) elif (command == 'Template'): existing_value = data elif (command == 'TimeStd' or command == 'TimeDst'): display.vv("TimeStd/TimeDst found!") existing_data = data.get(command) existing_day = existing_data.get("Day") existing_hemisphere = existing_data.get("Hemisphere") existing_hour = existing_data.get("Hour") existing_month = existing_data.get("Month") existing_offset = existing_data.get("Offset") existing_week = existing_data.get("Week") existing_value = "%s,%s,%s,%s,%s,%s" % ( existing_hemisphere, existing_week, existing_month, existing_day, existing_hour, existing_offset) elif (command == 'TuyaMCU'): # Return only relevant subset of fn/dp ids, ignoring the rest try: fn_id, dp_id = (int(x) for x in incoming_value.split(',')) except Exception as e: raise AnsibleOptionsError( "Invalid value '%s' for TuyaMCU: %s" % (incoming_value, e)) try: def our_entry(x): return fn_id == x['fnId'] or dp_id == x['dpId'] relevant_entries = list(filter(our_entry, data['TuyaMCU'])) relevant_entries = [ "%s,%s" % (x['fnId'], x['dpId']) for x in relevant_entries ] except KeyError as e: raise AnsibleRuntimeError("Invalid response: %s, error: %s" % (data, e)) if dp_id != 0: if len(relevant_entries) == 1: existing_value = relevant_entries[0] else: existing_value = relevant_entries else: if not relevant_entries: # Missing entries equals to disabled entry existing_value = incoming_value else: existing_value = relevant_entries elif (command == 'DimmerRange'): try: existing_value = "%s,%s" % (data[command]['Min'], data[command]['Max']) except Exception as e: raise AnsibleRuntimeError( "Invalid response payload: %s, error: %s" % (data, e)) display.v( "[%s] command: %s, existing_value: '%s', incoming_value: '%s'" % (tasmota_host, command, existing_value, incoming_value if not no_log else "")) display.v("[%s] existing_uri: %s" % (tasmota_host, endpoint_uri)) if existing_value != incoming_value: changed = True if not check_mode: change_params = copy.deepcopy(auth_params) change_params.update( {'cmnd': ("%s %s" % (command, incoming_value))}) change_response = requests.get(url=endpoint_uri, params=change_params) if status_response.status_code != 200: raise AnsibleRuntimeError("Unexpected response code: %s" % (status_response.status_code)) if warnings: display.warning(warnings) result["warning"] = warnings result["changed"] = changed result["command"] = command result["tasmota_host"] = tasmota_host result["raw_data"] = data result["endpoint_uri"] = endpoint_uri result["incoming_value"] = incoming_value result["existing_value"] = existing_value return result
def run(self): ''' use Runner lib to do SSH things ''' super(PullCLI, self).run() # log command line now = datetime.datetime.now() display.display(now.strftime("Starting Ansible Pull at %F %T")) display.display(' '.join(sys.argv)) # Build Checkout command # Now construct the ansible command node = platform.node() host = socket.getfqdn() limit_opts = 'localhost,%s,127.0.0.1' % ','.join( set([host, node, host.split('.')[0], node.split('.')[0]])) base_opts = '-c local ' if self.options.verbosity > 0: base_opts += ' -%s' % ''.join( ["v" for x in range(0, self.options.verbosity)]) # Attempt to use the inventory passed in as an argument # It might not yet have been downloaded so use localhost as default if not self.options.inventory or ( ',' not in self.options.inventory and not os.path.exists(self.options.inventory)): inv_opts = 'localhost,' else: inv_opts = self.options.inventory #FIXME: enable more repo modules hg/svn? if self.options.module_name == 'git': repo_opts = "name=%s dest=%s" % (self.options.url, self.options.dest) if self.options.checkout: repo_opts += ' version=%s' % self.options.checkout if self.options.accept_host_key: repo_opts += ' accept_hostkey=yes' if self.options.private_key_file: repo_opts += ' key_file=%s' % self.options.private_key_file if self.options.verify: repo_opts += ' verify_commit=yes' if self.options.clean: repo_opts += ' force=yes' if self.options.tracksubs: repo_opts += ' track_submodules=yes' if not self.options.fullclone: repo_opts += ' depth=1' path = module_loader.find_plugin(self.options.module_name) if path is None: raise AnsibleOptionsError( ("module '%s' not found.\n" % self.options.module_name)) bin_path = os.path.dirname(os.path.abspath(sys.argv[0])) # hardcode local and inventory/host as this is just meant to fetch the repo cmd = '%s/ansible -i "%s" %s -m %s -a "%s" all -l "%s"' % ( bin_path, inv_opts, base_opts, self.options.module_name, repo_opts, limit_opts) for ev in self.options.extra_vars: cmd += ' -e "%s"' % ev # Nap? if self.options.sleep: display.display("Sleeping for %d seconds..." % self.options.sleep) time.sleep(self.options.sleep) # RUN the Checkout command display.debug("running ansible with VCS module to checkout repo") display.vvvv('EXEC: %s' % cmd) rc, out, err = run_cmd(cmd, live=True) if rc != 0: if self.options.force: display.warning( "Unable to update repository. Continuing with (forced) run of playbook." ) else: return rc elif self.options.ifchanged and '"changed": true' not in out: display.display("Repository has not changed, quitting.") return 0 playbook = self.select_playbook(self.options.dest) if playbook is None: raise AnsibleOptionsError("Could not find a playbook to run.") # Build playbook command cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook) if self.options.vault_password_file: cmd += " --vault-password-file=%s" % self.options.vault_password_file if self.options.inventory: cmd += ' -i "%s"' % self.options.inventory for ev in self.options.extra_vars: cmd += ' -e "%s"' % ev if self.options.ask_sudo_pass or self.options.ask_su_pass or self.options.become_ask_pass: cmd += ' --ask-become-pass' if self.options.skip_tags: cmd += ' --skip-tags "%s"' % to_native(u','.join( self.options.skip_tags)) if self.options.tags: cmd += ' -t "%s"' % to_native(u','.join(self.options.tags)) if self.options.subset: cmd += ' -l "%s"' % self.options.subset else: cmd += ' -l "%s"' % limit_opts os.chdir(self.options.dest) # RUN THE PLAYBOOK COMMAND display.debug("running ansible-playbook to do actual work") display.debug('EXEC: %s' % cmd) rc, out, err = run_cmd(cmd, live=True) if self.options.purge: os.chdir('/') try: shutil.rmtree(self.options.dest) except Exception as e: display.error("Failed to remove %s: %s" % (self.options.dest, str(e))) return rc
def execute_init(self): """ Executes the init action, which creates the skeleton framework of a role that complies with the galaxy metadata format. """ init_path = self.get_opt('init_path', './') force = self.get_opt('force', False) offline = self.get_opt('offline', False) role_name = self.args.pop(0).strip() if self.args else None if not role_name: raise AnsibleOptionsError("- no role name specified for init") role_path = os.path.join(init_path, role_name) if os.path.exists(role_path): if os.path.isfile(role_path): raise AnsibleError( "- the path %s already exists, but is a file - aborting" % role_path) elif not force: raise AnsibleError( "- the directory %s already exists." "you can use --force to re-initialize this directory,\n" "however it will reset any main.yml files that may have\n" "been modified there already." % role_path) # create default README.md if not os.path.exists(role_path): os.makedirs(role_path) readme_path = os.path.join(role_path, "README.md") f = open(readme_path, "wb") f.write(self.galaxy.default_readme) f.close() # create default .travis.yml travis = Environment().from_string(self.galaxy.default_travis).render() f = open(os.path.join(role_path, '.travis.yml'), 'w') f.write(travis) f.close() for dir in GalaxyRole.ROLE_DIRS: dir_path = os.path.join(init_path, role_name, dir) main_yml_path = os.path.join(dir_path, 'main.yml') # create the directory if it doesn't exist already if not os.path.exists(dir_path): os.makedirs(dir_path) # now create the main.yml file for that directory if dir == "meta": # create a skeleton meta/main.yml with a valid galaxy_info # datastructure in place, plus with all of the available # platforms included (but commented out), the galaxy_tags # list, and the dependencies section platforms = [] if not offline and self.api: platforms = self.api.get_list("platforms") or [] # group the list of platforms from the api based # on their names, with the release field being # appended to a list of versions platform_groups = defaultdict(list) for platform in platforms: platform_groups[platform['name']].append( platform['release']) platform_groups[platform['name']].sort() inject = dict( author='your name', company='your company (optional)', license='license (GPLv2, CC-BY, etc)', issue_tracker_url='http://example.com/issue/tracker', min_ansible_version='1.2', platforms=platform_groups, ) rendered_meta = Environment().from_string( self.galaxy.default_meta).render(inject) f = open(main_yml_path, 'w') f.write(rendered_meta) f.close() pass elif dir == "tests": # create tests/test.yml inject = dict(role_name=role_name) playbook = Environment().from_string( self.galaxy.default_test).render(inject) f = open(os.path.join(dir_path, 'test.yml'), 'w') f.write(playbook) f.close() # create tests/inventory f = open(os.path.join(dir_path, 'inventory'), 'w') f.write('localhost') f.close() elif dir not in ('files', 'templates'): # just write a (mostly) empty YAML file for main.yml f = open(main_yml_path, 'w') f.write('---\n# %s file for %s\n' % (dir, role_name)) f.close() display.display("- %s was created successfully" % role_name)
def parse(self): ''' create an options parser for bin/ansible ''' self.parser = CLI.base_parser( usage='%prog -U <repository> [options]', connect_opts=True, vault_opts=True, runtask_opts=True, subset_opts=True, inventory_opts=True, module_opts=True, runas_prompt_opts=True, ) # options unique to pull self.parser.add_option('--purge', default=False, action='store_true', help='purge checkout after playbook run') self.parser.add_option( '-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true', help='only run the playbook if the repository has been updated') self.parser.add_option( '-s', '--sleep', dest='sleep', default=None, help= 'sleep for random interval (between 0 and n number of seconds) before starting. This is a useful way to disperse git requests' ) self.parser.add_option( '-f', '--force', dest='force', default=False, action='store_true', help='run the playbook even if the repository could not be updated' ) self.parser.add_option('-d', '--directory', dest='dest', default=None, help='directory to checkout repository to') self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository') self.parser.add_option( '--full', dest='fullclone', action='store_true', help='Do a full clone, instead of a shallow one.') self.parser.add_option('-C', '--checkout', dest='checkout', help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.') self.parser.add_option( '--accept-host-key', default=False, dest='accept_host_key', action='store_true', help='adds the hostkey for the repo url if not already added') self.parser.add_option( '-m', '--module-name', dest='module_name', default=self.DEFAULT_REPO_TYPE, help= 'Repository module name, which ansible will use to check out the repo. Default is %s.' % self.DEFAULT_REPO_TYPE) self.parser.add_option( '--verify-commit', dest='verify', default=False, action='store_true', help= 'verify GPG signature of checked out commit, if it fails abort running the playbook.' ' This needs the corresponding VCS module to support such an operation' ) self.parser.add_option( '--clean', dest='clean', default=False, action='store_true', help='modified files in the working repository will be discarded') self.parser.add_option( '--track-subs', dest='tracksubs', default=False, action='store_true', help='submodules will track the latest changes' ' This is equivalent to specifying the --remote flag to git submodule update' ) # for pull we don't wan't a default self.parser.set_defaults(inventory=None) super(PullCLI, self).parse() if not self.options.dest: hostname = socket.getfqdn() # use a hostname dependent directory, in case of $HOME on nfs self.options.dest = os.path.join('~/.ansible/pull', hostname) self.options.dest = os.path.expandvars( os.path.expanduser(self.options.dest)) if self.options.sleep: try: secs = random.randint(0, int(self.options.sleep)) self.options.sleep = secs except ValueError: raise AnsibleOptionsError("%s is not a number." % self.options.sleep) if not self.options.url: raise AnsibleOptionsError( "URL for repository not specified, use -h for help") if self.options.module_name not in self.SUPPORTED_REPO_MODULES: raise AnsibleOptionsError( "Unsuported repo module %s, choices are %s" % (self.options.module_name, ','.join( self.SUPPORTED_REPO_MODULES))) display.verbosity = self.options.verbosity self.validate_conflicts(vault_opts=True)
def run(self): super(DocCLI, self).run() plugin_type = context.CLIARGS['type'] do_json = context.CLIARGS['json_format'] if plugin_type in C.DOCUMENTABLE_PLUGINS: loader = getattr(plugin_loader, '%s_loader' % plugin_type) else: raise AnsibleOptionsError( "Unknown or undocumentable plugin type: %s" % plugin_type) # add to plugin paths from command line basedir = context.CLIARGS['basedir'] if basedir: AnsibleCollectionConfig.playbook_paths = basedir loader.add_directory(basedir, with_subdir=True) if context.CLIARGS['module_path']: for path in context.CLIARGS['module_path']: if path: loader.add_directory(path) # save only top level paths for errors search_paths = DocCLI.print_paths(loader) loader._paths = None # reset so we can use subdirs below # list plugins names or filepath for type, both options share most code if context.CLIARGS['list_files'] or context.CLIARGS['list_dir']: coll_filter = None if len(context.CLIARGS['args']) == 1: coll_filter = context.CLIARGS['args'][0] if coll_filter in ('', None): paths = loader._get_paths() for path in paths: self.plugin_list.update( DocCLI.find_plugins(path, plugin_type)) add_collection_plugins(self.plugin_list, plugin_type, coll_filter=coll_filter) # get appropriate content depending on option if context.CLIARGS['list_dir']: results = self._get_plugin_list_descriptions(loader) elif context.CLIARGS['list_files']: results = self._get_plugin_list_filenames(loader) if do_json: jdump(results) elif self.plugin_list: self.display_plugin_list(results) else: display.warning("No plugins found.") # dump plugin desc/data as JSON elif context.CLIARGS['dump']: plugin_data = {} plugin_names = DocCLI.get_all_plugins_of_type(plugin_type) for plugin_name in plugin_names: plugin_info = DocCLI.get_plugin_metadata( plugin_type, plugin_name) if plugin_info is not None: plugin_data[plugin_name] = plugin_info jdump(plugin_data) else: # display specific plugin docs if len(context.CLIARGS['args']) == 0: raise AnsibleOptionsError("Incorrect options passed") # get the docs for plugins in the command line list plugin_docs = {} for plugin in context.CLIARGS['args']: try: doc, plainexamples, returndocs, metadata = DocCLI._get_plugin_doc( plugin, plugin_type, loader, search_paths) except PluginNotFound: display.warning("%s %s not found in:\n%s\n" % (plugin_type, plugin, search_paths)) continue except Exception as e: display.vvv(traceback.format_exc()) raise AnsibleError( "%s %s missing documentation (or could not parse" " documentation): %s\n" % (plugin_type, plugin, to_native(e))) if not doc: # The doc section existed but was empty continue plugin_docs[plugin] = { 'doc': doc, 'examples': plainexamples, 'return': returndocs, 'metadata': metadata } if do_json: # Some changes to how json docs are formatted for plugin, doc_data in plugin_docs.items(): try: doc_data['return'] = yaml.safe_load(doc_data['return']) except Exception: pass jdump(plugin_docs) else: # Some changes to how plain text docs are formatted text = [] for plugin, doc_data in plugin_docs.items(): textret = DocCLI.format_plugin_doc(plugin, plugin_type, doc_data['doc'], doc_data['examples'], doc_data['return'], doc_data['metadata']) if textret: text.append(textret) else: display.warning( "No valid documentation was retrieved from '%s'" % plugin) if text: DocCLI.pager(''.join(text)) return 0
def parse(self, inventory, loader, path, cache=True): super().parse(inventory, loader, path) if not self.no_config_file_supplied and os.path.isfile(path): self._read_config_data(path) # Defer processing of params to logic shared with the modules module_params = {} for plugin_param, module_param in ControllerAPIModule.short_params.items( ): opt_val = self.get_option(plugin_param) if opt_val is not None: module_params[module_param] = opt_val module = ControllerAPIModule(argument_spec={}, direct_params=module_params, error_callback=handle_error, warn_callback=self.warn_callback) # validate type of inventory_id because we allow two types as special case inventory_id = self.get_option('inventory_id') if isinstance(inventory_id, int): inventory_id = to_text(inventory_id, nonstring='simplerepr') else: try: inventory_id = ensure_type(inventory_id, 'str') except ValueError as e: raise_from( AnsibleOptionsError( 'Invalid type for configuration option inventory_id, ' 'not integer, and cannot convert to string: {err}'. format(err=to_native(e))), e) inventory_id = inventory_id.replace('/', '') inventory_url = '/api/v2/inventories/{inv_id}/script/'.format( inv_id=inventory_id) inventory = module.get_endpoint(inventory_url, data={ 'hostvars': '1', 'towervars': '1', 'all': '1' })['json'] # To start with, create all the groups. for group_name in inventory: if group_name != '_meta': self.inventory.add_group(group_name) # Then, create all hosts and add the host vars. all_hosts = inventory['_meta']['hostvars'] for host_name, host_vars in six.iteritems(all_hosts): self.inventory.add_host(host_name) for var_name, var_value in six.iteritems(host_vars): self.inventory.set_variable(host_name, var_name, var_value) # Lastly, create to group-host and group-group relationships, and set group vars. for group_name, group_content in six.iteritems(inventory): if group_name != 'all' and group_name != '_meta': # First add hosts to groups for host_name in group_content.get('hosts', []): self.inventory.add_host(host_name, group_name) # Then add the parent-children group relationships. for child_group_name in group_content.get('children', []): # add the child group to groups, if its already there it will just throw a warning self.inventory.add_group(child_group_name) self.inventory.add_child(group_name, child_group_name) # Set the group vars. Note we should set group var for 'all', but not '_meta'. if group_name != '_meta': for var_name, var_value in six.iteritems( group_content.get('vars', {})): self.inventory.set_variable(group_name, var_name, var_value) # Fetch extra variables if told to do so if self.get_option('include_metadata'): config_data = module.get_endpoint('/api/v2/config/')['json'] server_data = {} server_data['license_type'] = config_data.get( 'license_info', {}).get('license_type', 'unknown') for key in ('version', 'ansible_version'): server_data[key] = config_data.get(key, 'unknown') self.inventory.set_variable('all', 'tower_metadata', server_data) self.inventory.set_variable('all', 'controller_metadata', server_data) # Clean up the inventory. self.inventory.reconcile_inventory()
def execute_install(self): """ uses the args list of roles to be installed, unless -f was specified. The list of roles can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file. """ role_file = self.options.role_file if len(self.args) == 0 and role_file is None: # the user needs to specify one of either --role-file or specify a single user/role name raise AnsibleOptionsError( "- you must specify a user/role name or a roles file") no_deps = self.options.no_deps force = self.options.force roles_left = [] if role_file: try: f = open(role_file, 'r') if role_file.endswith('.yaml') or role_file.endswith('.yml'): try: required_roles = yaml.safe_load(f.read()) except Exception as e: raise AnsibleError( "Unable to load data from the requirements file: %s" % role_file) if required_roles is None: raise AnsibleError("No roles found in file: %s" % role_file) for role in required_roles: if "include" not in role: role = RoleRequirement.role_yaml_parse(role) display.vvv("found role %s in yaml file" % str(role)) if "name" not in role and "scm" not in role: raise AnsibleError( "Must specify name or src for role") roles_left.append(GalaxyRole(self.galaxy, **role)) else: with open(role["include"]) as f_include: try: roles_left += [ GalaxyRole(self.galaxy, **r) for r in ( RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include)) ] except Exception as e: msg = "Unable to load data from the include requirements file: %s %s" raise AnsibleError(msg % (role_file, e)) else: raise AnsibleError("Invalid role requirements file") f.close() except (IOError, OSError) as e: raise AnsibleError('Unable to open %s: %s' % (role_file, to_native(e))) else: # roles were specified directly, so we'll just go out grab them # (and their dependencies, unless the user doesn't want us to). for rname in self.args: role = RoleRequirement.role_yaml_parse(rname.strip()) roles_left.append(GalaxyRole(self.galaxy, **role)) for role in roles_left: # only process roles in roles files when names matches if given if role_file and self.args and role.name not in self.args: display.vvv('Skipping role %s' % role.name) continue display.vvv('Processing role %s ' % role.name) # query the galaxy API for the role data if role.install_info is not None: if role.install_info['version'] != role.version or force: if force: display.display( '- changing role %s from %s to %s' % (role.name, role.install_info['version'], role.version or "unspecified")) role.remove() else: display.warning( '- %s (%s) is already installed - use --force to change version to %s' % (role.name, role.install_info['version'], role.version or "unspecified")) continue else: if not force: display.display( '- %s is already installed, skipping.' % str(role)) continue try: installed = role.install() except AnsibleError as e: display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e))) self.exit_without_ignore() continue # install dependencies, if we want them if not no_deps and installed: if not role.metadata: display.warning( "Meta file %s is empty. Skipping dependencies." % role.path) else: role_dependencies = role.metadata.get('dependencies') or [] for dep in role_dependencies: display.debug('Installing dep %s' % dep) dep_req = RoleRequirement() dep_info = dep_req.role_yaml_parse(dep) dep_role = GalaxyRole(self.galaxy, **dep_info) if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None: # we know we can skip this, as it's not going to # be found on galaxy.ansible.com continue if dep_role.install_info is None: if dep_role not in roles_left: display.display('- adding dependency: %s' % str(dep_role)) roles_left.append(dep_role) else: display.display( '- dependency %s already pending installation.' % dep_role.name) else: if dep_role.install_info[ 'version'] != dep_role.version: display.warning( '- dependency %s from role %s differs from already installed version (%s), skipping' % (str(dep_role), role.name, dep_role.install_info['version'])) else: display.display( '- dependency %s is already installed, skipping.' % dep_role.name) if not installed: display.warning("- %s was NOT installed successfully." % role.name) self.exit_without_ignore() return 0
def run(self): ''' use Runner lib to do SSH things ''' super(AdHocCLI, self).run() # only thing left should be host pattern pattern = self.args[0] # ignore connection password cause we are local if self.options.connection == "local": self.options.ask_pass = False sshpass = None becomepass = None vault_pass = None self.normalize_become_options() (sshpass, becomepass) = self.ask_passwords() passwords = {'conn_pass': sshpass, 'become_pass': becomepass} if self.options.vault_password_file: # read vault_pass from a file vault_pass = CLI.read_vault_password_file( self.options.vault_password_file) elif self.options.ask_vault_pass: vault_pass = self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)[0] loader = DataLoader(vault_password=vault_pass) variable_manager = VariableManager() variable_manager.extra_vars = load_extra_vars(loader=loader, options=self.options) inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory) variable_manager.set_inventory(inventory) hosts = inventory.list_hosts(pattern) if len(hosts) == 0: self.display.warning( "provided hosts list is empty, only localhost is available") if self.options.listhosts: for host in hosts: self.display.display(' %s' % host) return 0 if self.options.module_name in C.MODULE_REQUIRE_ARGS and not self.options.module_args: raise AnsibleOptionsError("No argument passed to %s module" % self.options.module_name) #TODO: implement async support #if self.options.seconds: # callbacks.display("background launch...\n\n", color='cyan') # results, poller = runner.run_async(self.options.seconds) # results = self.poll_while_needed(poller) #else: # results = runner.run() # create a pseudo-play to execute the specified module via a single task play_ds = self._play_ds(pattern) play = Play().load(play_ds, variable_manager=variable_manager, loader=loader) if self.options.one_line: cb = 'oneline' else: cb = 'minimal' # now create a task queue manager to execute the play self._tqm = None try: self._tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, display=self.display, options=self.options, passwords=passwords, stdout_callback=cb, ) result = self._tqm.run(play) finally: if self._tqm: self._tqm.cleanup() return result