def _on_remote_node_close(self, node, rc, gateway): """remote node closing with return code""" DistantWorker._on_node_close(self, node, rc) self.logger.debug("_on_remote_node_close %s %s via gw %s", node, self._close_count, gateway) # finalize rcopy: extract tar data if self.source and self.reverse: for bnode, buf in self._rcopy_bufs.items(): tarfileobj = self._rcopy_tars[bnode] if len(buf) > 0: self.logger.debug("flushing node %s buf %d bytes", bnode, len(buf)) tarfileobj.write(buf) tarfileobj.flush() tarfileobj.seek(0) tmptar = tarfile.open(fileobj=tarfileobj) try: self.logger.debug("%s extracting %d members in dest %s", bnode, len(tmptar.getmembers()), self.dest) tmptar.extractall(path=self.dest) except IOError as ex: self._on_remote_node_msgline(bnode, ex, 'stderr', gateway) finally: tmptar.close() self._rcopy_bufs = {} self._rcopy_tars = {} self.gwtargets[str(gateway)].remove(node) self._close_count += 1 self._check_fini(gateway)
def __init__(self, nodes, handler, timeout, **kwargs): """ Initialize Ssh worker instance. """ DistantWorker.__init__(self, handler) self.clients = [] self.nodes = NodeSet(nodes) self.command = kwargs.get('command') self.source = kwargs.get('source') self.dest = kwargs.get('dest') autoclose = kwargs.get('autoclose', False) stderr = kwargs.get('stderr', False) self._close_count = 0 self._has_timeout = False # Prepare underlying engine clients (ssh/scp processes) if self.command is not None: # secure remote shell for node in self.nodes: self.clients.append(Ssh(node, self.command, self, stderr, timeout, autoclose)) elif self.source: # secure copy for node in self.nodes: self.clients.append(Scp(node, self.source, self.dest, self, stderr, timeout, kwargs.get('preserve', False), kwargs.get('reverse', False))) else: raise ValueError("missing command or source parameter in " \ "WorkerSsh constructor")
def _on_remote_node_rc(self, node, rc, gateway): DistantWorker._on_node_rc(self, node, rc) self.logger.debug("_on_remote_node_rc %s %s via gw %s", node, self._close_count, gateway) self.gwtargets[gateway].remove(node) self._close_count += 1 self._check_fini(gateway)
def _on_remote_node_timeout(self, node, gateway): DistantWorker._on_node_timeout(self, node) self.logger.debug("_on_remote_node_timeout %s via gw %s", node, gateway) self._close_count += 1 self._has_timeout = True self.gwtargets[gateway].remove(node) self._check_fini(gateway)
def _set_task(self, task): """ Bind worker to task. Called by task.schedule(). WorkerTree metaworker: override to schedule sub-workers. """ ##if fanout is None: ## fanout = self.router.fanout ##self.task.set_info('fanout', fanout) DistantWorker._set_task(self, task) # Now bound to task - initalize router self.topology = self.topology or task.topology self.router = self.router or task._default_router() # And launch stuffs next_hops = self._distribute(self.task.info("fanout"), self.nodes) for gw, targets in next_hops.iteritems(): if gw == targets: self.logger.debug('task.shell cmd=%s nodes=%s timeout=%d' % \ (self.command, self.nodes, self.timeout)) self._child_count += 1 self._target_count += len(targets) self.workers.append(self.task.shell(self.command, nodes=targets, timeout=self.timeout, handler=self.metahandler, stderr=self.stderr, tree=False)) else: self._execute_remote(self.command, targets, gw, self.timeout)
def __init__(self, nodes, handler, timeout, **kwargs): """ Initialize Tree worker instance. @param nodes: Targeted nodeset. @param handler: Worker EventHandler. @param timeout: Timeout value for worker. @param command: Command to execute. @param topology: Force specific TopologyTree. @param newroot: Root node of TopologyTree. """ DistantWorker.__init__(self, handler) self.workers = [] self.nodes = NodeSet(nodes) self.timeout = timeout self.command = kwargs.get('command') self.source = kwargs.get('source') self.dest = kwargs.get('dest') autoclose = kwargs.get('autoclose', False) self.stderr = kwargs.get('stderr', False) self._close_count = 0 self._start_count = 0 self._child_count = 0 self._target_count = 0 self._has_timeout = False self.logger = logging.getLogger(__name__) if self.command is not None: pass elif self.source: raise NotImplementedError else: raise ValueError("missing command or source parameter in " \ "WorkerTree constructor") # build gateway invocation command invoke_gw_args = [] for envname in ('PYTHONPATH', \ 'CLUSTERSHELL_GW_LOG_DIR', \ 'CLUSTERSHELL_GW_LOG_LEVEL'): envval = os.getenv(envname) if envval: invoke_gw_args.append("%s=%s" % (envname, envval)) invoke_gw_args.append("python -m ClusterShell/Gateway -Bu") self.invoke_gateway = ' '.join(invoke_gw_args) self.topology = kwargs.get('topology') if self.topology is not None: self.newroot = kwargs.get('newroot') or str( self.topology.root.nodeset) self.router = PropagationTreeRouter(self.newroot, self.topology) else: self.router = None self.upchannel = None self.metahandler = MetaWorkerEventHandler(self) # gateway -> targets selection self.gwtargets = {}
def _on_remote_node_rc(self, node, rc, gateway): """remote rc received""" DistantWorker._on_node_rc(self, node, rc) self.logger.debug("_on_remote_node_rc %s %s via gw %s", node, self._close_count, gateway) # finalize rcopy: extract tar data if self.source and self.reverse: for node, buf in self._rcopy_bufs.iteritems(): tarfileobj = self._rcopy_tars[node] if len(buf) > 0: self.logger.debug("flushing node %s buf %d bytes", node, len(buf)) tarfileobj.write(buf) tarfileobj.flush() tarfileobj.seek(0) try: tmptar = tarfile.open(fileobj=tarfileobj) try: self.logger.debug("%s extracting %d members in dest %s", node, len(tmptar.getmembers()), self.dest) tmptar.extractall(path=self.dest) except IOError, ex: self._on_remote_node_msgline(node, ex, 'stderr', gateway) # note: try-except-finally not supported before python 2.5 finally: tmptar.close() self._rcopy_bufs = {} self._rcopy_tars = {} self.gwtargets[gateway].remove(node) self._close_count += 1 self._check_fini(gateway)
def __init__(self, nodes, handler, timeout, **kwargs): """ Initialize Tree worker instance. @param nodes: Targeted nodeset. @param handler: Worker EventHandler. @param timeout: Timeout value for worker. @param command: Command to execute. @param topology: Force specific TopologyTree. @param newroot: Root node of TopologyTree. """ DistantWorker.__init__(self, handler) self.workers = [] self.nodes = NodeSet(nodes) self.timeout = timeout self.command = kwargs.get('command') self.source = kwargs.get('source') self.dest = kwargs.get('dest') autoclose = kwargs.get('autoclose', False) self.stderr = kwargs.get('stderr', False) self.remote = kwargs.get('remote', True) self._close_count = 0 self._start_count = 0 self._child_count = 0 self._target_count = 0 self._has_timeout = False self.logger = logging.getLogger(__name__) if self.command is None and self.source is None: raise ValueError("missing command or source parameter in " "WorkerTree constructor") # build gateway invocation command invoke_gw_args = [] for envname in ('PYTHONPATH', \ 'CLUSTERSHELL_GW_LOG_DIR', \ 'CLUSTERSHELL_GW_LOG_LEVEL'): envval = os.getenv(envname) if envval: invoke_gw_args.append("%s=%s" % (envname, envval)) invoke_gw_args.append("python -m ClusterShell/Gateway -Bu") self.invoke_gateway = ' '.join(invoke_gw_args) self.topology = kwargs.get('topology') if self.topology is not None: self.newroot = kwargs.get('newroot') or str(self.topology.root.nodeset) self.router = PropagationTreeRouter(self.newroot, self.topology) else: self.router = None self.upchannel = None self.metahandler = MetaWorkerEventHandler(self) # gateway -> active targets selection self.gwtargets = {}
def __init__(self, nodes, handler, timeout=None, **kwargs): """Create an ExecWorker and its engine client instances.""" DistantWorker.__init__(self, handler) self._close_count = 0 self._has_timeout = False self._clients = [] self.nodes = NodeSet(nodes) self.command = kwargs.get('command') self.source = kwargs.get('source') self.dest = kwargs.get('dest') self._create_clients(timeout=timeout, **kwargs)
def _set_task(self, task): """ Bind worker to task. Called by task.schedule(). WorkerTree metaworker: override to schedule sub-workers. """ ##if fanout is None: ## fanout = self.router.fanout ##self.task.set_info('fanout', fanout) DistantWorker._set_task(self, task) # Now bound to task - initalize router self.topology = self.topology or task.topology self.router = self.router or task._default_router() self._launch(self.nodes)
def _set_task(self, task): """ Bind worker to task. Called by task.schedule(). WorkerTree metaworker: override to schedule sub-workers. """ ##if fanout is None: ## fanout = self.router.fanout ##self.task.set_info('fanout', fanout) DistantWorker._set_task(self, task) # Now bound to task - initalize router self.topology = self.topology or task.topology self.router = self.router or task._default_router() self._launch(self.nodes) self._check_ini()
def _on_remote_node_msgline(self, node, msg, sname, gateway): """remote msg received""" if not self.source or not self.reverse or sname != 'stdout': DistantWorker._on_node_msgline(self, node, msg, sname) return # rcopy only: we expect base64 encoded tar content on stdout encoded = self._rcopy_bufs.setdefault(node, '') + msg if node not in self._rcopy_tars: self._rcopy_tars[node] = tempfile.TemporaryFile() # partial base64 decoding requires a multiple of 4 characters encoded_sz = (len(encoded) // 4) * 4 # write decoded binary msg to node temporary tarfile self._rcopy_tars[node].write(base64.b64decode(encoded[0:encoded_sz])) # keep trailing encoded chars for next time self._rcopy_bufs[node] = encoded[encoded_sz:]
def __init__(self, nodes, handler, timeout, **kwargs): """ Initialize Pdsh worker instance. """ DistantWorker.__init__(self, handler) self.nodes = NodeSet(nodes) self.closed_nodes = NodeSet() self.command = kwargs.get('command') self.source = kwargs.get('source') self.dest = kwargs.get('dest') autoclose = kwargs.get('autoclose', False) stderr = kwargs.get('stderr', False) EngineClient.__init__(self, self, stderr, timeout, autoclose) if self.command is not None: # PDSH self.source = None self.dest = None self.mode = 'pdsh' elif self.source: # PDCP self.command = None self.mode = 'pdcp' # Preserve modification times and modes? self.preserve = kwargs.get('preserve', False) # Reverse copy (rpdcp)? self.reverse = kwargs.get('reverse', False) if self.reverse: self.isdir = os.path.isdir(self.dest) if not self.isdir: raise ValueError("reverse copy dest must be a directory") else: self.isdir = os.path.isdir(self.source) else: raise ValueError("missing command or source parameter in " \ "WorkerPdsh constructor") self.popen = None self._buf = ""
def _on_node_timeout(self, node): DistantWorker._on_node_timeout(self, node) self._has_timeout = True
def _on_node_timeout(self, node): DistantWorker._on_node_timeout(self, node) self._close_count += 1 self._has_timeout = True
def _on_node_rc(self, node, rc): """ Return code received from a node, update last* stuffs. """ DistantWorker._on_node_rc(self, node, rc) self.closed_nodes.add(node)
def _on_remote_node_msgline(self, node, msg, sname, gateway): DistantWorker._on_node_msgline(self, node, msg, sname)
def _on_node_rc(self, node, rc): DistantWorker._on_node_rc(self, node, rc) self.logger.debug("_on_node_rc %s %s (%s)", node, rc, self._close_count) self._close_count += 1
def _on_node_rc(self, node, rc): DistantWorker._on_node_rc(self, node, rc) self._close_count += 1
def __init__(self, nodes, handler, timeout, **kwargs): """ Initialize Tree worker instance. :param nodes: Targeted nodeset. :param handler: Worker EventHandler. :param timeout: Timeout value for worker. :param command: Command to execute. :param topology: Force specific TopologyTree. :param newroot: Root node of TopologyTree. """ DistantWorker.__init__(self, handler) self.logger = logging.getLogger(__name__) self.workers = [] self.nodes = NodeSet(nodes) self.timeout = timeout self.command = kwargs.get('command') self.source = kwargs.get('source') self.dest = kwargs.get('dest') autoclose = kwargs.get('autoclose', False) self.stderr = kwargs.get('stderr', False) self.logger.debug("stderr=%s", self.stderr) self.remote = kwargs.get('remote', True) self.preserve = kwargs.get('preserve', None) self.reverse = kwargs.get('reverse', False) self._rcopy_bufs = {} self._rcopy_tars = {} self._close_count = 0 self._start_count = 0 self._child_count = 0 self._target_count = 0 self._has_timeout = False if self.command is None and self.source is None: raise ValueError("missing command or source parameter in " "WorkerTree constructor") # rcopy is enforcing separated stderr to handle tar error messages # because stdout is used for data transfer if self.source and self.reverse: self.stderr = True # build gateway invocation command invoke_gw_args = [] for envname in ('PYTHONPATH', 'CLUSTERSHELL_GW_LOG_DIR', 'CLUSTERSHELL_GW_LOG_LEVEL', 'CLUSTERSHELL_GW_B64_LINE_LENGTH'): envval = os.getenv(envname) if envval: invoke_gw_args.append("%s=%s" % (envname, envval)) invoke_gw_args.append("python -m ClusterShell/Gateway -Bu") self.invoke_gateway = ' '.join(invoke_gw_args) self.topology = kwargs.get('topology') if self.topology is not None: self.newroot = kwargs.get('newroot') or \ str(self.topology.root.nodeset) self.router = PropagationTreeRouter(self.newroot, self.topology) else: self.router = None self.upchannel = None self.metahandler = MetaWorkerEventHandler(self) # gateway -> active targets selection self.gwtargets = {}
def _on_node_rc(self, node, rc): DistantWorker._on_node_rc(self, node, rc) self._close_count += 1 self._check_fini()
def __init__(self, nodes, handler, timeout, **kwargs): """ Initialize Tree worker instance. :param nodes: Targeted nodeset. :param handler: Worker EventHandler. :param timeout: Timeout value for worker. :param command: Command to execute. :param topology: Force specific TopologyTree. :param newroot: Root node of TopologyTree. """ DistantWorker.__init__(self, handler) self.logger = logging.getLogger(__name__) self.workers = [] self.nodes = NodeSet(nodes) self.timeout = timeout self.command = kwargs.get('command') self.source = kwargs.get('source') self.dest = kwargs.get('dest') autoclose = kwargs.get('autoclose', False) self.stderr = kwargs.get('stderr', False) self.logger.debug("stderr=%s", self.stderr) self.remote = kwargs.get('remote', True) self.preserve = kwargs.get('preserve', None) self.reverse = kwargs.get('reverse', False) self._rcopy_bufs = {} self._rcopy_tars = {} self._close_count = 0 self._start_count = 0 self._child_count = 0 self._target_count = 0 self._has_timeout = False if self.command is None and self.source is None: raise ValueError("missing command or source parameter in " "TreeWorker constructor") # rcopy is enforcing separated stderr to handle tar error messages # because stdout is used for data transfer if self.source and self.reverse: self.stderr = True # build gateway invocation command invoke_gw_args = [] for envname in ('PYTHONPATH', 'CLUSTERSHELL_GW_LOG_DIR', 'CLUSTERSHELL_GW_LOG_LEVEL', 'CLUSTERSHELL_GW_B64_LINE_LENGTH'): envval = os.getenv(envname) if envval: invoke_gw_args.append("%s=%s" % (envname, envval)) # It is critical to launch a remote Python executable with the same # major version (ie. python or python3) as we use the (default) pickle # protocol and for example, version 3+ (Python 3 with bytes # support) cannot be unpickled by Python 2. python_executable = os.getenv('CLUSTERSHELL_GW_PYTHON_EXECUTABLE', basename(sys.executable or 'python')) invoke_gw_args.append(python_executable) invoke_gw_args.extend(['-m', 'ClusterShell.Gateway', '-Bu']) self.invoke_gateway = ' '.join(invoke_gw_args) self.topology = kwargs.get('topology') if self.topology is not None: self.newroot = kwargs.get('newroot') or \ str(self.topology.root.nodeset) self.router = PropagationTreeRouter(self.newroot, self.topology) else: self.router = None self.upchannel = None self.metahandler = MetaWorkerEventHandler(self) # gateway (string) -> active targets selection self.gwtargets = {}