def _close(self, abort, timeout): """Close client. See EngineClient._close().""" if abort: # it's safer to call poll() first for long time completed processes prc = self.popen.poll() # if prc is None, process is still running if prc is None: try: # try to kill it self.popen.kill() except OSError: pass prc = self.popen.wait() if prc > 0: raise WorkerError("Cannot run pdsh (error %d)" % prc) self.streams.clear() if timeout: assert abort, "abort flag not set on timeout" for node in (self.key - self._closed_nodes): self.worker._on_node_timeout(node) else: for node in (self.key - self._closed_nodes): self.worker._on_node_rc(node, 0) self.worker._check_fini()
def _replace_cmd(pattern, node, rank): """ Replace keywords in `pattern' with value from `node' and `rank'. %h, %host map `node' %n, %rank map `rank' """ variables = { 'h': node, 'host': node, 'hosts': node, 'n': rank or 0, 'rank': rank or 0, # 'u': None, } class Replacer(Template): delimiter = '%' try: cmd = Replacer(pattern).substitute(variables) except (KeyError, ValueError) as error: msg = "%s is not a valid pattern, use '%%%%' to escape '%%'" % error raise WorkerError(msg) return cmd
def _close(self, abort, flush, timeout): """ Close client. See EngineClient._close(). """ if abort: prc = self.popen.poll() if prc is None: # process is still running, kill it self.popen.kill() prc = self.popen.wait() if prc >= 0: rc = prc if rc != 0: raise WorkerError("Cannot run pdsh (error %d)" % rc) if abort and timeout: if self.eh: self.eh.ev_timeout(self) os.close(self.fd_reader) self.fd_reader = None if self.fd_error: os.close(self.fd_error) self.fd_error = None if self.fd_writer: os.close(self.fd_writer) self.fd_writer = None if timeout: assert abort, "abort flag not set on timeout" for node in (self.nodes - self.closed_nodes): self._on_node_timeout(node) else: for node in (self.nodes - self.closed_nodes): self._on_node_rc(node, 0) if self.eh: self.eh.ev_close(self)
def _launch(self, nodes): self.logger.debug("WorkerTree._launch on %s (fanout=%d)", nodes, self.task.info("fanout")) # Prepare copy params if source is defined destdir = None if self.source: self.logger.debug("copy self.dest=%s", self.dest) # Special processing to determine best arcname and destdir for tar. # The only case that we don't support is when source is a file and # dest is a dir without a finishing / (in that case we cannot # determine remotely whether it is a file or a directory). if isfile(self.source): # dest is not normalized here arcname = basename(self.dest) or basename(normpath( self.source)) destdir = dirname(self.dest) else: arcname = basename(normpath(self.source)) destdir = os.path.normpath(self.dest) self.logger.debug("copy arcname=%s destdir=%s", arcname, destdir) # And launch stuffs next_hops = self._distribute(self.task.info("fanout"), nodes.copy()) self.logger.debug("next_hops=%s" % [(str(n), str(v)) for n, v in next_hops.items()]) for gw, targets in next_hops.iteritems(): if gw == targets: self.logger.debug( 'task.shell cmd=%s source=%s nodes=%s ' 'timeout=%s remote=%s', self.command, self.source, nodes, self.timeout, self.remote) self._child_count += 1 self._target_count += len(targets) if self.remote: if self.source: self.logger.debug('_launch remote untar (destdir=%s)', destdir) self.command = self.UNTAR_CMD_FMT % destdir worker = self.task.shell(self.command, nodes=targets, timeout=self.timeout, handler=self.metahandler, stderr=self.stderr, tree=False) else: worker = self.task.shell(self.command, nodes=targets, timeout=self.timeout, handler=self.metahandler, stderr=self.stderr, tree=False) else: assert self.source is None worker = ExecWorker(nodes=targets, command=self.command, handler=self.metahandler, timeout=self.timeout, stderr=self.stderr) self.task.schedule(worker) self.workers.append(worker) self.logger.debug("added child worker %s count=%d", worker, len(self.workers)) else: self.logger.debug("trying gateway %s to reach %s", gw, targets) if self.source: self._copy_remote(self.source, destdir, targets, gw, self.timeout) else: self._execute_remote(self.command, targets, gw, self.timeout) # Copy mode: send tar data after above workers have been initialized if self.source: try: # create temporary tar file with all source files tmptar = tempfile.TemporaryFile() tar = tarfile.open(fileobj=tmptar, mode='w:') tar.add(self.source, arcname=arcname) tar.close() tmptar.flush() # read generated tar file and send to worker tmptar.seek(0) rbuf = tmptar.read(32768) while len(rbuf) > 0: self.write(rbuf) rbuf = tmptar.read(32768) except OSError, exc: raise WorkerError(exc)
def _launch(self, nodes): self.logger.debug("WorkerTree._launch on %s (fanout=%d)", nodes, self.task.info("fanout")) # Prepare copy params if source is defined destdir = None if self.source: if self.reverse: self.logger.debug("rcopy source=%s, dest=%s", self.source, self.dest) # dest is a directory destdir = self.dest else: self.logger.debug("copy source=%s, dest=%s", self.source, self.dest) # Special processing to determine best arcname and destdir for # tar. The only case that we don't support is when source is a # file and dest is a dir without a finishing / (in that case we # cannot determine remotely whether it is a file or a # directory). if isfile(self.source): # dest is not normalized here arcname = basename(self.dest) or \ basename(normpath(self.source)) destdir = dirname(self.dest) else: # source is a directory: if dest has a trailing slash # like in /tmp/ then arcname is basename(source) # but if dest is /tmp/newname (without leading slash) then # arcname becomes newname. if self.dest[-1] == '/': arcname = basename(self.source) else: arcname = basename(self.dest) # dirname has not the same behavior when a leading slash is # present, and we want that. destdir = dirname(self.dest) self.logger.debug("copy arcname=%s destdir=%s", arcname, destdir) # And launch stuffs next_hops = self._distribute(self.task.info("fanout"), nodes.copy()) self.logger.debug("next_hops=%s" % [(str(n), str(v)) for n, v in next_hops.items()]) for gw, targets in next_hops.iteritems(): if gw == targets: self.logger.debug('task.shell cmd=%s source=%s nodes=%s ' 'timeout=%s remote=%s', self.command, self.source, nodes, self.timeout, self.remote) self._child_count += 1 self._target_count += len(targets) if self.remote: if self.source: # Note: specific case where targets are not in topology # as self.source is never used on remote gateways # so we try a direct copy/rcopy: self.logger.debug('_launch copy r=%s source=%s dest=%s', self.reverse, self.source, self.dest) worker = self.task.copy(self.source, self.dest, targets, handler=self.metahandler, stderr=self.stderr, timeout=self.timeout, preserve=self.preserve, reverse=self.reverse, tree=False) else: worker = self.task.shell(self.command, nodes=targets, timeout=self.timeout, handler=self.metahandler, stderr=self.stderr, tree=False) else: assert self.source is None worker = ExecWorker(nodes=targets, command=self.command, handler=self.metahandler, timeout=self.timeout, stderr=self.stderr) self.task.schedule(worker) self.workers.append(worker) self.logger.debug("added child worker %s count=%d", worker, len(self.workers)) else: self.logger.debug("trying gateway %s to reach %s", gw, targets) if self.source: self._copy_remote(self.source, destdir, targets, gw, self.timeout, self.reverse) else: self._execute_remote(self.command, targets, gw, self.timeout) # Copy mode: send tar data after above workers have been initialized if self.source and not self.reverse: try: # create temporary tar file with all source files tmptar = tempfile.TemporaryFile() tar = tarfile.open(fileobj=tmptar, mode='w:') tar.add(self.source, arcname=arcname) tar.close() tmptar.flush() # read generated tar file tmptar.seek(0) rbuf = tmptar.read(32768) # send tar data to remote targets only while len(rbuf) > 0: self._write_remote(rbuf) rbuf = tmptar.read(32768) except OSError as exc: raise WorkerError(exc)