def run(self, terms, **kwargs): if not isinstance(terms, Sequence): raise AssibleError("testns.testcol.noop expects a list") return terms
def run(self): super(PlaybookCLI, self).run() # Note: slightly wrong, this is written so that implicit localhost # manages passwords sshpass = None becomepass = None passwords = {} # initial error check, to make sure all specified playbooks are accessible # before we start running anything through the playbook executor b_playbook_dirs = [] for playbook in context.CLIARGS['args']: if not os.path.exists(playbook): raise AssibleError("the playbook: %s could not be found" % playbook) if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)): raise AssibleError( "the playbook: %s does not appear to be a file" % playbook) b_playbook_dir = os.path.dirname( os.path.abspath( to_bytes(playbook, errors='surrogate_or_strict'))) # load plugins from all playbooks in case they add callbacks/inventory/etc add_all_plugin_dirs(b_playbook_dir) b_playbook_dirs.append(b_playbook_dir) AssibleCollectionConfig.playbook_paths = b_playbook_dirs playbook_collection = _get_collection_name_from_path( b_playbook_dirs[0]) if playbook_collection: display.warning("running playbook inside collection {0}".format( playbook_collection)) AssibleCollectionConfig.default_collection = playbook_collection # don't deal with privilege escalation or passwords when we don't need to if not (context.CLIARGS['listhosts'] or context.CLIARGS['listtasks'] or context.CLIARGS['listtags'] or context.CLIARGS['syntax']): (sshpass, becomepass) = self.ask_passwords() passwords = {'conn_pass': sshpass, 'become_pass': becomepass} # create base objects loader, inventory, variable_manager = self._play_prereqs() # (which is not returned in list_hosts()) is taken into account for # warning if inventory is empty. But it can't be taken into account for # checking if limit doesn't match any hosts. Instead we don't worry about # limit if only implicit localhost was in inventory to start with. # # Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts()) CLI.get_host_list(inventory, context.CLIARGS['subset']) # flush fact cache if requested if context.CLIARGS['flush_cache']: self._flush_cache(inventory, variable_manager) # create the playbook executor, which manages running the plays via a task queue manager pbex = PlaybookExecutor(playbooks=context.CLIARGS['args'], inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=passwords) results = pbex.run() if isinstance(results, list): for p in results: display.display('\nplaybook: %s' % p['playbook']) for idx, play in enumerate(p['plays']): if play._included_path is not None: loader.set_basedir(play._included_path) else: pb_dir = os.path.realpath( os.path.dirname(p['playbook'])) loader.set_basedir(pb_dir) msg = "\n play #%d (%s): %s" % (idx + 1, ','.join( play.hosts), play.name) mytags = set(play.tags) msg += '\tTAGS: [%s]' % (','.join(mytags)) if context.CLIARGS['listhosts']: playhosts = set(inventory.get_hosts(play.hosts)) msg += "\n pattern: %s\n hosts (%d):" % ( play.hosts, len(playhosts)) for host in playhosts: msg += "\n %s" % host display.display(msg) all_tags = set() if context.CLIARGS['listtags'] or context.CLIARGS[ 'listtasks']: taskmsg = '' if context.CLIARGS['listtasks']: taskmsg = ' tasks:\n' def _process_block(b): taskmsg = '' for task in b.block: if isinstance(task, Block): taskmsg += _process_block(task) else: if task.action == 'meta' and task.implicit: continue all_tags.update(task.tags) if context.CLIARGS['listtasks']: cur_tags = list( mytags.union(set(task.tags))) cur_tags.sort() if task.name: taskmsg += " %s" % task.get_name( ) else: taskmsg += " %s" % task.action taskmsg += "\tTAGS: [%s]\n" % ', '.join( cur_tags) return taskmsg all_vars = variable_manager.get_vars(play=play) for block in play.compile(): block = block.filter_tagged_tasks(all_vars) if not block.has_tasks(): continue taskmsg += _process_block(block) if context.CLIARGS['listtags']: cur_tags = list(mytags.union(all_tags)) cur_tags.sort() taskmsg += " TASK TAGS: [%s]\n" % ', '.join( cur_tags) display.display(taskmsg) return 0 else: return results
def parse_source(self, source, cache=False): ''' Generate or update inventory for the source provided ''' parsed = False display.debug(u'Examining possible inventory source: %s' % source) # use binary for path functions b_source = to_bytes(source) # process directories as a collection of inventories if os.path.isdir(b_source): display.debug(u'Searching for inventory files in directory: %s' % source) for i in sorted(os.listdir(b_source)): display.debug(u'Considering %s' % i) # Skip hidden files and stuff we explicitly ignore if IGNORED.search(i): continue # recursively deal with directory entries fullpath = to_text(os.path.join(b_source, i), errors='surrogate_or_strict') parsed_this_one = self.parse_source(fullpath, cache=cache) display.debug(u'parsed %s as %s' % (fullpath, parsed_this_one)) if not parsed: parsed = parsed_this_one else: # left with strings or files, let plugins figure it out # set so new hosts can use for inventory_file/dir vars self._inventory.current_source = source # try source with each plugin failures = [] for plugin in self._fetch_inventory_plugins(): plugin_name = to_text( getattr(plugin, '_load_name', getattr(plugin, '_original_path', ''))) display.debug(u'Attempting to use plugin %s (%s)' % (plugin_name, plugin._original_path)) # initialize and figure out if plugin wants to attempt parsing this file try: plugin_wants = bool(plugin.verify_file(source)) except Exception: plugin_wants = False if plugin_wants: try: # FIXME in case plugin fails 1/2 way we have partial inventory plugin.parse(self._inventory, self._loader, source, cache=cache) try: plugin.update_cache_if_changed() except AttributeError: # some plugins might not implement caching pass parsed = True display.vvv( 'Parsed %s inventory source with %s plugin' % (source, plugin_name)) break except AssibleParserError as e: display.debug('%s was not parsable by %s' % (source, plugin_name)) tb = ''.join(traceback.format_tb(sys.exc_info()[2])) failures.append({ 'src': source, 'plugin': plugin_name, 'exc': e, 'tb': tb }) except Exception as e: display.debug( '%s failed while attempting to parse %s' % (plugin_name, source)) tb = ''.join(traceback.format_tb(sys.exc_info()[2])) failures.append({ 'src': source, 'plugin': plugin_name, 'exc': AssibleError(e), 'tb': tb }) else: display.vvv( "%s declined parsing %s as it did not pass its verify_file() method" % (plugin_name, source)) else: if not parsed and failures: # only if no plugin processed files should we show errors. for fail in failures: display.warning( u'\n* Failed to parse %s with %s plugin: %s' % (to_text(fail['src']), fail['plugin'], to_text(fail['exc']))) if 'tb' in fail: display.vvv(to_text(fail['tb'])) if C.INVENTORY_ANY_UNPARSED_IS_FAILED: raise AssibleError( u'Completely failed to parse inventory source %s' % (source)) if not parsed: if source != '/etc/assible/hosts' or os.path.exists(source): # only warn if NOT using the default and if using it, only if the file is present display.warning("Unable to parse %s as an inventory source" % source) # clear up, jic self._inventory.current_source = None return parsed
def run(self, terms, variables=None, url=None, queue=None, count=None): if not HAS_PIKA: raise AssibleError('pika python package is required for rabbitmq lookup.') if not url: raise AssibleError('URL is required for rabbitmq lookup.') if not queue: raise AssibleError('Queue is required for rabbitmq lookup.') display.vvv(u"terms:%s : variables:%s url:%s queue:%s count:%s" % (terms, variables, url, queue, count)) try: parameters = pika.URLParameters(url) except Exception as e: raise AssibleError("URL malformed: %s" % to_native(e)) try: connection = pika.BlockingConnection(parameters) except Exception as e: raise AssibleError("Connection issue: %s" % to_native(e)) try: conn_channel = connection.channel() except pika.exceptions.AMQPChannelError as e: try: connection.close() except pika.exceptions.AMQPConnectionError as ie: raise AssibleError("Channel and connection closing issues: %s / %s" % to_native(e), to_native(ie)) raise AssibleError("Channel issue: %s" % to_native(e)) ret = [] idx = 0 while True: method_frame, properties, body = conn_channel.basic_get(queue=queue) if method_frame: display.vvv(u"%s, %s, %s " % (method_frame, properties, to_text(body))) # TODO: In the future consider checking content_type and handle text/binary data differently. msg_details = dict({ 'msg': to_text(body), 'message_count': method_frame.message_count, 'routing_key': method_frame.routing_key, 'delivery_tag': method_frame.delivery_tag, 'redelivered': method_frame.redelivered, 'exchange': method_frame.exchange, 'delivery_mode': properties.delivery_mode, 'content_type': properties.content_type, 'headers': properties.headers }) if properties.content_type == 'application/json': try: msg_details['json'] = json.loads(msg_details['msg']) except ValueError as e: raise AssibleError("Unable to decode JSON for message %s: %s" % (method_frame.delivery_tag, to_native(e))) ret.append(msg_details) conn_channel.basic_ack(method_frame.delivery_tag) idx += 1 if method_frame.message_count == 0 or idx == count: break # If we didn't get a method_frame, exit. else: break if connection.is_closed: return [ret] else: try: connection.close() except pika.exceptions.AMQPConnectionError: pass return [ret]
def _populate(self): raw_params = dict( docker_host=self.get_option('docker_host'), tls=self.get_option('tls'), tls_verify=self.get_option('validate_certs'), key_path=self.get_option('client_key'), cacert_path=self.get_option('ca_cert'), cert_path=self.get_option('client_cert'), tls_hostname=self.get_option('tls_hostname'), api_version=self.get_option('api_version'), timeout=self.get_option('timeout'), ssl_version=self.get_option('ssl_version'), debug=None, ) update_tls_hostname(raw_params) connect_params = get_connect_params(raw_params, fail_function=self._fail) self.client = docker.DockerClient(**connect_params) self.inventory.add_group('all') self.inventory.add_group('manager') self.inventory.add_group('worker') self.inventory.add_group('leader') self.inventory.add_group('nonleaders') if self.get_option('include_host_uri'): if self.get_option('include_host_uri_port'): host_uri_port = str(self.get_option('include_host_uri_port')) elif self.get_option('tls') or self.get_option('validate_certs'): host_uri_port = '2376' else: host_uri_port = '2375' try: self.nodes = self.client.nodes.list() for self.node in self.nodes: self.node_attrs = self.client.nodes.get(self.node.id).attrs self.inventory.add_host(self.node_attrs['ID']) self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role']) self.inventory.set_variable(self.node_attrs['ID'], 'assible_host', self.node_attrs['Status']['Addr']) if self.get_option('include_host_uri'): self.inventory.set_variable( self.node_attrs['ID'], 'assible_host_uri', 'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port) if self.get_option('verbose_output'): self.inventory.set_variable( self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs) if 'ManagerStatus' in self.node_attrs: if self.node_attrs['ManagerStatus'].get('Leader'): # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0 # Check moby/moby#35437 for details swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \ self.node_attrs['Status']['Addr'] if self.get_option('include_host_uri'): self.inventory.set_variable( self.node_attrs['ID'], 'assible_host_uri', 'tcp://' + swarm_leader_ip + ':' + host_uri_port) self.inventory.set_variable(self.node_attrs['ID'], 'assible_host', swarm_leader_ip) self.inventory.add_host(self.node_attrs['ID'], group='leader') else: self.inventory.add_host(self.node_attrs['ID'], group='nonleaders') else: self.inventory.add_host(self.node_attrs['ID'], group='nonleaders') # Use constructed if applicable strict = self.get_option('strict') # Composed variables self._set_composite_vars(self.get_option('compose'), self.node_attrs, self.node_attrs['ID'], strict=strict) # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, self.node_attrs['ID'], strict=strict) # Create groups based on variable values and add the corresponding hosts to it self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, self.node_attrs['ID'], strict=strict) except Exception as e: raise AssibleError( 'Unable to fetch hosts from Docker swarm API, this was the original exception: %s' % to_native(e))
def _fail(self, msg): raise AssibleError(msg)
def run(self, play): ''' Iterates over the roles/tasks in a play, using the given (or default) strategy for queueing tasks. The default is the linear strategy, which operates like classic Assible by keeping all hosts in lock-step with a given task (meaning no hosts move on to the next task until all hosts are done with the current task). ''' if not self._callbacks_loaded: self.load_callbacks() all_vars = self._variable_manager.get_vars(play=play) templar = Templar(loader=self._loader, variables=all_vars) warn_if_reserved(all_vars, templar.environment.globals.keys()) new_play = play.copy() new_play.post_validate(templar) new_play.handlers = new_play.compile_roles_handlers( ) + new_play.handlers self.hostvars = HostVars( inventory=self._inventory, variable_manager=self._variable_manager, loader=self._loader, ) play_context = PlayContext(new_play, self.passwords, self._connection_lockfile.fileno()) if (self._stdout_callback and hasattr(self._stdout_callback, 'set_play_context')): self._stdout_callback.set_play_context(play_context) for callback_plugin in self._callback_plugins: if hasattr(callback_plugin, 'set_play_context'): callback_plugin.set_play_context(play_context) self.send_callback('v2_playbook_on_play_start', new_play) # build the iterator iterator = PlayIterator( inventory=self._inventory, play=new_play, play_context=play_context, variable_manager=self._variable_manager, all_vars=all_vars, start_at_done=self._start_at_done, ) # adjust to # of workers to configured forks or size of batch, whatever is lower self._initialize_processes(min(self._forks, iterator.batch_size)) # load the specified strategy (or the default linear one) strategy = strategy_loader.get(new_play.strategy, self) if strategy is None: raise AssibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds) # Because the TQM may survive multiple play runs, we start by marking # any hosts as failed in the iterator here which may have been marked # as failed in previous runs. Then we clear the internal list of failed # hosts so we know what failed this round. for host_name in self._failed_hosts.keys(): host = self._inventory.get_host(host_name) iterator.mark_host_failed(host) self.clear_failed_hosts() # during initialization, the PlayContext will clear the start_at_task # field to signal that a matching task was found, so check that here # and remember it so we don't try to skip tasks on future plays if context.CLIARGS.get( 'start_at_task' ) is not None and play_context.start_at_task is None: self._start_at_done = True # and run the play using the strategy and cleanup on way out try: play_return = strategy.run(iterator, play_context) finally: strategy.cleanup() self._cleanup_processes() # now re-save the hosts that failed from the iterator to our internal list for host_name in iterator.get_failed_hosts(): self._failed_hosts[host_name] = True return play_return
def load_callbacks(self): ''' Loads all available callbacks, with the exception of those which utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout', only one such callback plugin will be loaded. ''' if self._callbacks_loaded: return stdout_callback_loaded = False if self._stdout_callback is None: self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK if isinstance(self._stdout_callback, CallbackBase): stdout_callback_loaded = True elif isinstance(self._stdout_callback, string_types): if self._stdout_callback not in callback_loader: raise AssibleError( "Invalid callback for stdout specified: %s" % self._stdout_callback) else: self._stdout_callback = callback_loader.get( self._stdout_callback) self._stdout_callback.set_options() stdout_callback_loaded = True else: raise AssibleError( "callback must be an instance of CallbackBase or the name of a callback plugin" ) # get all configured loadable callbacks (adjacent, builtin) callback_list = list(callback_loader.all(class_only=True)) # add whitelisted callbacks that refer to collections, which might not appear in normal listing for c in C.DEFAULT_CALLBACK_WHITELIST: # load all, as collection ones might be using short/redirected names and not a fqcn plugin = callback_loader.get(c, class_only=True) # TODO: check if this skip is redundant, loader should handle bad file/plugin cases already if plugin: # avoids incorrect and dupes possible due to collections if plugin not in callback_list: callback_list.append(plugin) else: display.warning( "Skipping callback plugin '%s', unable to load" % c) # for each callback in the list see if we should add it to 'active callbacks' used in the play for callback_plugin in callback_list: callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', '') callback_needs_whitelist = getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False) # try to get colleciotn world name first cnames = getattr(callback_plugin, '_redirected_names', []) if cnames: # store the name the plugin was loaded as, as that's what we'll need to compare to the configured callback list later callback_name = cnames[0] else: # fallback to 'old loader name' (callback_name, _) = os.path.splitext( os.path.basename(callback_plugin._original_path)) display.vvvvv("Attempting to use '%s' callback." % (callback_name)) if callback_type == 'stdout': # we only allow one callback of type 'stdout' to be loaded, if callback_name != self._stdout_callback or stdout_callback_loaded: display.vv( "Skipping callback '%s', as we already have a stdout callback." % (callback_name)) continue stdout_callback_loaded = True elif callback_name == 'tree' and self._run_tree: # TODO: remove special case for tree, which is an adhoc cli option --tree pass elif not self._run_additional_callbacks or ( callback_needs_whitelist and ( # only run if not adhoc, or adhoc was specifically configured to run + check enabled list C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)): # 2.x plugins shipped with assible should require whitelisting, older or non shipped should load automatically continue try: callback_obj = callback_plugin() # avoid bad plugin not returning an object, only needed cause we do class_only load and bypass loader checks, # really a bug in the plugin itself which we ignore as callback errors are not supposed to be fatal. if callback_obj: # skip initializing if we already did the work for the same plugin (even with diff names) if callback_obj not in self._callback_plugins: callback_obj.set_options() self._callback_plugins.append(callback_obj) else: display.vv( "Skipping callback '%s', already loaded as '%s'." % (callback_plugin, callback_name)) else: display.warning( "Skipping callback '%s', as it does not create a valid plugin instance." % callback_name) continue except Exception as e: display.warning( "Skipping callback '%s', unable to load due to: %s" % (callback_name, to_native(e))) continue self._callbacks_loaded = True
def run(self, tmp=None, task_vars=None): """ Load yml files recursively from a directory. """ del tmp # tmp no longer has any effect if task_vars is None: task_vars = dict() self.show_content = True self.included_files = [] # Validate arguments dirs = 0 files = 0 for arg in self._task.args: if arg in self.VALID_DIR_ARGUMENTS: dirs += 1 elif arg in self.VALID_FILE_ARGUMENTS: files += 1 elif arg in self.VALID_ALL: pass else: raise AssibleError( '{0} is not a valid option in include_vars'.format( to_native(arg))) if dirs and files: raise AssibleError( "You are mixing file only and dir only arguments, these are incompatible" ) # set internal vars from args self._set_args() results = dict() if self.source_dir: self._set_dir_defaults() self._set_root_dir() if not path.exists(self.source_dir): failed = True err_msg = ('{0} directory does not exist'.format( to_native(self.source_dir))) elif not path.isdir(self.source_dir): failed = True err_msg = ('{0} is not a directory'.format( to_native(self.source_dir))) else: for root_dir, filenames in self._traverse_dir_depth(): failed, err_msg, updated_results = ( self._load_files_in_dir(root_dir, filenames)) if failed: break results.update(updated_results) else: try: self.source_file = self._find_needle('vars', self.source_file) failed, err_msg, updated_results = (self._load_files( self.source_file)) if not failed: results.update(updated_results) except AssibleError as e: failed = True err_msg = to_native(e) if self.return_results_as_name: scope = dict() scope[self.return_results_as_name] = results results = scope result = super(ActionModule, self).run(task_vars=task_vars) if failed: result['failed'] = failed result['message'] = err_msg result['assible_included_var_files'] = self.included_files result['assible_facts'] = results result['_assible_no_log'] = not self.show_content return result