def __init__(self, playbooks, inventory, variable_manager, loader, options, passwords): self._playbooks = playbooks self._inventory = inventory self._variable_manager = variable_manager self._loader = loader self._options = options self.passwords = passwords self._unreachable_hosts = dict() if options.listhosts or options.listtasks or options.listtags or options.syntax: self._tqm = None else: self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords) # Note: We run this here to cache whether the default ansible ssh # executable supports control persist. Sometime in the future we may # need to enhance this to check that ansible_ssh_executable specified # in inventory is also cached. We can't do this caching at the point # where it is used (in task_executor) because that is post-fork and # therefore would be discarded after every task. check_for_controlpersist(C.ANSIBLE_SSH_EXECUTABLE)
def __init__(self, playbooks, inventory, variable_manager, loader, options, passwords, callback): super(MyPlaybookExecutor, self).__init__(playbooks, inventory, variable_manager, loader, options, passwords) if options.listhosts or options.listtasks or options.listtags or options.syntax: self._tqm = None else: # MyTaskQueueManager 执行自定义的任务队列,过滤掉系统自己的callback self._tqm = MyTaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords, stdout_callback=callback) check_for_controlpersist(C.ANSIBLE_SSH_EXECUTABLE)
class OPSPlaybookExecutor(PlaybookExecutor): '''rewrite PlayBookExecutor''' def __init__(self, playbooks, inventory, variable_manager, loader, options, passwords, stdout_callback=None): self._playbooks = playbooks self._inventory = inventory self._variable_manager = variable_manager self._loader = loader self._options = options self.passwords = passwords self._unreachable_hosts = dict() if options.listhosts or options.listtasks or options.listtags or options.syntax: self._tqm = None else: self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords, stdout_callback=stdout_callback) check_for_controlpersist(C.ANSIBLE_SSH_EXECUTABLE)
def __init__(self, tid, playbooks, inventory, variable_manager, loader, options, passwords): self._playbooks = playbooks self._inventory = inventory self._variable_manager = variable_manager self._loader = loader self._options = options self.passwords = passwords self._unreachable_hosts = dict() if options.listhosts or options.listtasks or options.listtags or options.syntax: self._tqm = None else: self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords, stdout_callback=ResultCallback(tid)) check_for_controlpersist(C.ANSIBLE_SSH_EXECUTABLE)
def __init__(self, task_id, playbooks, inventory, variable_manager, loader, passwords): self._playbooks = playbooks self._inventory = inventory self._variable_manager = variable_manager self._loader = loader self.passwords = passwords self._unreachable_hosts = dict() if context.CLIARGS.get('listhosts') or context.CLIARGS.get('listtasks') or \ context.CLIARGS.get('listtags') or context.CLIARGS.get('syntax'): self._tqm = None else: self._tqm = PlayBookTaskQueueManager_V2( inventory=inventory, variable_manager=variable_manager, loader=loader, passwords=self.passwords, stdout_callback=RedisCallBack(task_id)) check_for_controlpersist(C.ANSIBLE_SSH_EXECUTABLE)
def __init__(self, playbooks, inventory, variable_manager, loader, options, passwords): self._playbooks = playbooks self._inventory = inventory self._variable_manager = variable_manager self._loader = loader self._options = options self.passwords = passwords self._unreachable_hosts = dict() if options.listhosts or options.listtasks or options.listtags or options.syntax: self._tqm = None else: self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords) # Note: We run this here to cache whether the default ansible ssh # executable supports control persist. Sometime in the future we may # need to enhance this to check that ansible_ssh_executable specified # in inventory is also cached. We can't do this caching at the point # where it is used (in task_executor) because that is post-fork and # therefore would be discarded after every task. check_for_controlpersist(C.ANSIBLE_SSH_EXECUTABLE)
def __get__(self, obj, obj_type=None): from ansible.module_utils.compat.paramiko import paramiko from ansible.utils.ssh_functions import check_for_controlpersist value = super().__get__(obj, obj_type) if value == 'smart': value = 'ssh' # see if SSH can support ControlPersist if not use paramiko if not check_for_controlpersist('ssh') and paramiko is not None: value = "paramiko" # if someone did `connection: persistent`, default it to using a persistent paramiko connection to avoid problems elif value == 'persistent' and paramiko is not None: value = 'paramiko' return value
def _get_attr_connection(self): ''' connections are special, this takes care of responding correctly ''' conn_type = None if self._attributes['connection'] == 'smart': conn_type = 'ssh' # see if SSH can support ControlPersist if not use paramiko if not check_for_controlpersist('ssh') and paramiko is not None: conn_type = "paramiko" # if someone did `connection: persistent`, default it to using a persistent paramiko connection to avoid problems elif self._attributes['connection'] == 'persistent' and paramiko is not None: conn_type = 'paramiko' if conn_type: self.connection = conn_type return self._attributes['connection']
def _get_attr_connection(self): ''' connections are special, this takes care of responding correctly ''' conn_type = None if self._attributes['connection'] == 'smart': conn_type = 'ssh' if sys.platform.startswith('darwin') and self.password: # due to a current bug in sshpass on OSX, which can trigger # a kernel panic even for non-privileged users, we revert to # paramiko on that OS when a SSH password is specified conn_type = "paramiko" else: # see if SSH can support ControlPersist if not use paramiko if not check_for_controlpersist(self.ssh_executable): conn_type = "paramiko" # if someone did `connection: persistent`, default it to using a persistent paramiko connection to avoid problems elif self._attributes['connection'] == 'persistent': conn_type = 'paramiko' if conn_type: self.connection = conn_type return self._attributes['connection']
def _get_attr_connection(self): ''' connections are special, this takes care of responding correctly ''' conn_type = None if self._attributes['connection'] == 'smart': conn_type = 'ssh' if sys.platform.startswith('darwin') and self.password: # due to a current bug in sshpass on OSX, which can trigger # a kernel panic even for non-privileged users, we revert to # paramiko on that OS when a SSH password is specified conn_type = "paramiko" else: # see if SSH can support ControlPersist if not use paramiko if not check_for_controlpersist(self.ssh_executable): conn_type = "paramiko" # if someone did `connection: persistent`, default it to using a persistent paramiko connection to avoid problems elif self._attributes['connection'] == 'persistent': conn_type = 'paramiko' if conn_type: self.connection = conn_type return self._attributes['connection']
def _get_connection(self, variables, templar): ''' Reads the connection property for the host, and returns the correct connection object from the list of connection plugins ''' if self._task.delegate_to is not None: # since we're delegating, we don't want to use interpreter values # which would have been set for the original target host for i in list(variables.keys()): if isinstance(i, string_types) and i.startswith( 'ansible_') and i.endswith('_interpreter'): del variables[i] # now replace the interpreter values with those that may have come # from the delegated-to host delegated_vars = variables.get('ansible_delegated_vars', dict()).get(self._task.delegate_to, dict()) if isinstance(delegated_vars, dict): for i in delegated_vars: if isinstance(i, string_types) and i.startswith( "ansible_") and i.endswith("_interpreter"): variables[i] = delegated_vars[i] conn_type = self._play_context.connection if conn_type == 'smart': conn_type = 'ssh' if sys.platform.startswith( 'darwin') and self._play_context.password: # due to a current bug in sshpass on OSX, which can trigger # a kernel panic even for non-privileged users, we revert to # paramiko on that OS when a SSH password is specified conn_type = "paramiko" else: # see if SSH can support ControlPersist if not use paramiko if not check_for_controlpersist( self._play_context.ssh_executable): conn_type = "paramiko" # if using persistent connections (or the action has set the FORCE_PERSISTENT_CONNECTION # attribute to True), then we use the persistent connection plugion. Otherwise load the # requested connection plugin if C.USE_PERSISTENT_CONNECTIONS or getattr( self, 'FORCE_PERSISTENT_CONNECTION', False) or conn_type == 'persistent': # if someone did `connection: persistent`, default it to using a # persistent paramiko connection to avoid problems if conn_type == 'persistent': self._play_context.connection = 'paramiko' connection = self._shared_loader_obj.connection_loader.get( 'persistent', self._play_context, self._new_stdin) else: connection = self._shared_loader_obj.connection_loader.get( conn_type, self._play_context, self._new_stdin) if not connection: raise AnsibleError("the connection plugin '%s' was not found" % conn_type) if self._play_context.accelerate: # accelerate is deprecated as of 2.1... display.deprecated( 'Accelerated mode is deprecated. Consider using SSH with ControlPersist and pipelining enabled instead' ) # launch the accelerated daemon here ssh_connection = connection handler = self._shared_loader_obj.action_loader.get( 'normal', task=self._task, connection=ssh_connection, play_context=self._play_context, loader=self._loader, templar=templar, shared_loader_obj=self._shared_loader_obj, ) key = key_for_hostname(self._play_context.remote_addr) accelerate_args = dict( password=base64.b64encode(key.__str__()), port=self._play_context.accelerate_port, minutes=C.ACCELERATE_DAEMON_TIMEOUT, ipv6=self._play_context.accelerate_ipv6, debug=self._play_context.verbosity, ) connection = self._shared_loader_obj.connection_loader.get( 'accelerate', self._play_context, self._new_stdin) if not connection: raise AnsibleError("the connection plugin '%s' was not found" % conn_type) try: connection._connect() except AnsibleConnectionFailure: display.debug('connection failed, fallback to accelerate') res = handler._execute_module(module_name='accelerate', module_args=accelerate_args, task_vars=variables, delete_remote_tmp=False) display.debug(res) connection._connect() return connection
def _run_playbook(self, yamlfile, is_checksyntax=False): if not is_checksyntax : if self.options.flush_cache: self._flush_cache() self._loading_callback(yamlfile) # self._unreachable_hosts = dict() tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=self.callback, ) check_for_controlpersist(C.ANSIBLE_SSH_EXECUTABLE) else : tqm = None try : pb = Playbook.load( yamlfile, variable_manager=self.variable_manager, loader=self.loader ) self.inventory.set_playbook_basedir(yamlfile) if tqm is not None : tqm.load_callbacks() tqm.send_callback('v2_playbook_on_start', pb) self.logger.debug(self.log_prefix + '任务开始执行') plays = pb.get_plays() for play in plays: if play._included_path is not None: self.loader.set_basedir(play._included_path) else: self.loader.set_basedir(pb._basedir) self.inventory.remove_restriction() all_vars = self.variable_manager.get_vars(loader=self.loader, play=play) templar = Templar(loader=self.loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) if is_checksyntax or tqm is None: return True # tqm._unreachable_hosts.update(self._unreachable_hosts) batches = self._get_serialized_batches(new_play) if len(batches) == 0: tqm.send_callback('v2_playbook_on_play_start', new_play) tqm.send_callback('v2_playbook_on_no_hosts_matched') self.logger.debug(self.log_prefix + '任务开始执行,但没有匹配主机') # 需要写日志,需要写该模块 continue for batch in batches: self.inventory.restrict_to_hosts(batch) try : tqm.run(play) except Exception as e: print(e) pass # self._unreachable_hosts.update(tqm._unreachable_hosts) tqm.send_callback('v2_playbook_on_stats', tqm._stats) self.logger.debug(self.log_prefix + '任务执行完比') # self.host_list = self.inventory.get_hosts(play.hosts) tqm.cleanup() self.loader.cleanup_all_tmp_files() return True except Exception as e: print(e) if is_checksyntax : return False else : return False
def _get_connection(self, variables, templar): ''' Reads the connection property for the host, and returns the correct connection object from the list of connection plugins ''' if self._task.delegate_to is not None: # since we're delegating, we don't want to use interpreter values # which would have been set for the original target host for i in list(variables.keys()): if isinstance(i, string_types) and i.startswith('ansible_') and i.endswith('_interpreter'): del variables[i] # now replace the interpreter values with those that may have come # from the delegated-to host delegated_vars = variables.get('ansible_delegated_vars', dict()).get(self._task.delegate_to, dict()) if isinstance(delegated_vars, dict): for i in delegated_vars: if isinstance(i, string_types) and i.startswith("ansible_") and i.endswith("_interpreter"): variables[i] = delegated_vars[i] conn_type = self._play_context.connection if conn_type == 'smart': conn_type = 'ssh' if sys.platform.startswith('darwin') and self._play_context.password: # due to a current bug in sshpass on OSX, which can trigger # a kernel panic even for non-privileged users, we revert to # paramiko on that OS when a SSH password is specified conn_type = "paramiko" else: # see if SSH can support ControlPersist if not use paramiko if not check_for_controlpersist(self._play_context.ssh_executable): conn_type = "paramiko" # if using persistent connections (or the action has set the FORCE_PERSISTENT_CONNECTION # attribute to True), then we use the persistent connection plugion. Otherwise load the # requested connection plugin if C.USE_PERSISTENT_CONNECTIONS or getattr(self, 'FORCE_PERSISTENT_CONNECTION', False) or conn_type == 'persistent': # if someone did `connection: persistent`, default it to using a # persistent paramiko connection to avoid problems if conn_type == 'persistent': self._play_context.connection = 'paramiko' connection = self._shared_loader_obj.connection_loader.get('persistent', self._play_context, self._new_stdin) else: connection = self._shared_loader_obj.connection_loader.get(conn_type, self._play_context, self._new_stdin) if not connection: raise AnsibleError("the connection plugin '%s' was not found" % conn_type) if self._play_context.accelerate: # accelerate is deprecated as of 2.1... display.deprecated('Accelerated mode is deprecated. Consider using SSH with ControlPersist and pipelining enabled instead') # launch the accelerated daemon here ssh_connection = connection handler = self._shared_loader_obj.action_loader.get( 'normal', task=self._task, connection=ssh_connection, play_context=self._play_context, loader=self._loader, templar=templar, shared_loader_obj=self._shared_loader_obj, ) key = key_for_hostname(self._play_context.remote_addr) accelerate_args = dict( password=base64.b64encode(key.__str__()), port=self._play_context.accelerate_port, minutes=C.ACCELERATE_DAEMON_TIMEOUT, ipv6=self._play_context.accelerate_ipv6, debug=self._play_context.verbosity, ) connection = self._shared_loader_obj.connection_loader.get('accelerate', self._play_context, self._new_stdin) if not connection: raise AnsibleError("the connection plugin '%s' was not found" % conn_type) try: connection._connect() except AnsibleConnectionFailure: display.debug('connection failed, fallback to accelerate') res = handler._execute_module(module_name='accelerate', module_args=accelerate_args, task_vars=variables, delete_remote_tmp=False) display.debug(res) connection._connect() return connection
def run_playbook(self, playbook, workname, work_uuid, username, describe, yaml_content, is_checksyntax=False): """ :param playbooks: playbook路径的列表 :return: """ if is_checksyntax: self._tqm = None else: # self.callback = PlayBookResultsCollector() # self.callback = ResultCallback(work_uuid, workname, 'playbook', self.options, describe, # yaml_content=yaml_content, username=username) self.callback = JsonCallback(work_uuid, workname, 'playbook', self.options, describe, yaml_content=yaml_content, username=username) self._tqm = TaskQueueManager(inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=self.callback) check_for_controlpersist(C.ANSIBLE_SSH_EXECUTABLE) try: pb = Playbook.load(playbook, variable_manager=self.variable_manager, loader=self.loader) if self._tqm is not None: self._tqm.load_callbacks() self._tqm.send_callback('v2_playbook_on_start', pb) plays = pb.get_plays() for play in plays: if play._included_path is not None: self.loader.set_basedir(play._included_path) else: self.loader.set_basedir(pb._basedir) self.inventory.remove_restriction() # Allow variables to be used in vars_prompt fields. all_vars = self.variable_manager.get_vars(play=play) templar = Templar(loader=self.loader, variables=all_vars) play.post_validate(templar) if is_checksyntax or self._tqm is None: return True batches = self._get_serialized_batches(play) if len(batches) == 0: self._tqm.send_callback('v2_playbook_on_play_start', play) self._tqm.send_callback('v2_playbook_on_no_hosts_matched') continue for batch in batches: self.inventory.restrict_to_hosts(batch) try: self._tqm.run(play) except Exception as e: print(e) self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) self._tqm.cleanup() self.loader.cleanup_all_tmp_files() return True except Exception as e: print(e) return False