コード例 #1
0
ファイル: TaskLocalMixin.py プロジェクト: samkos/clustershell
    def test_mixed_worker_retcodes(self):
        """test Task retcode handling with mixed workers"""

        # This test case failed with CS <= 1.7.3
        # Conditions: task.max_retcode() set during runtime (not None)
        # and then a StreamWorker closing, thus calling Task._set_rc(rc=None)
        # To reproduce, we start a StreamWorker on first read of a ExecWorker.

        class TestH(EventHandler):
            def __init__(self, worker2):
                self.worker2 = worker2

            def ev_read(self, worker):
                worker.task.schedule(self.worker2)

        worker2 = StreamWorker(handler=None)
        worker1 = ExecWorker(nodes='localhost', handler=TestH(worker2),
                             command="echo ok")

        # Create pipe stream
        rfd1, wfd1 = os.pipe()
        worker2.set_reader("pipe1", rfd1, closefd=False)
        os.write(wfd1, b"test\n")
        os.close(wfd1)

        # Enable pipe1_msgtree
        task_self().set_default("pipe1_msgtree", True)

        task_self().schedule(worker1)
        task_self().run()

        self.assertEqual(worker1.node_buffer('localhost'), b"ok")
        self.assertEqual(worker1.node_retcode('localhost'), 0)
        self.assertEqual(worker2.read(sname="pipe1"), b"test")
        self.assertEqual(task_self().max_retcode(), 0)
コード例 #2
0
    def test_mixed_worker_retcodes(self):
        """test Task retcode handling with mixed workers"""

        # This test case failed with CS <= 1.7.3
        # Conditions: task.max_retcode() set during runtime (not None)
        # and then a StreamWorker closing, thus calling Task._set_rc(rc=None)
        # To reproduce, we start a StreamWorker on first read of a ExecWorker.

        class TestH(EventHandler):
            def __init__(self, worker2):
                self.worker2 = worker2

            def ev_read(self, worker):
                worker.task.schedule(self.worker2)

        worker2 = StreamWorker(handler=None)
        worker1 = ExecWorker(nodes='localhost', handler=TestH(worker2),
                             command="echo ok")

        # Create pipe stream
        rfd1, wfd1 = os.pipe()
        worker2.set_reader("pipe1", rfd1, closefd=False)
        os.write(wfd1, b"test\n")
        os.close(wfd1)

        # Enable pipe1_msgtree
        task_self().set_default("pipe1_msgtree", True)

        task_self().schedule(worker1)
        task_self().run()

        self.assertEqual(worker1.node_buffer('localhost'), b"ok")
        self.assertEqual(worker1.node_retcode('localhost'), 0)
        self.assertEqual(worker2.read(sname="pipe1"), b"test")
        self.assertEqual(task_self().max_retcode(), 0)
コード例 #3
0
ファイル: Action.py プロジェクト: fihuer/milkcheck
    def perform_action(self, action):
        """Perform an immediate action"""
        assert not action.to_skip(), "Action should be already SKIPPED"

        if not action.parent.simulate:
            self.add_task(action)
        call_back_self().notify(action.parent, EV_STARTED)

        nodes = None
        if action.mode != 'delegate':
            nodes = action.target

        # In dry-run mode, all commands are replaced by a simple ':'
        command = ':'
        if not self.dryrun:
            command = action.command

        if action.mode == 'exec':
            wkr = ExecWorker(nodes=nodes, handler=ActionEventHandler(action),
                             timeout=action.timeout, command=command,
                             remote=action.remote)
            self._master_task.schedule(wkr)
        else:
            self._master_task.shell(command, nodes=nodes,
                                    timeout=action.timeout,
                                    handler=ActionEventHandler(action),
                                    remote=action.remote)
コード例 #4
0
 def execw(self, **kwargs):
     """helper method to spawn and run ExecWorker"""
     worker = ExecWorker(**kwargs)
     task_self().schedule(worker)
     task_self().run()
     return worker
コード例 #5
0
ファイル: Tree.py プロジェクト: diorsman/clustershell
    def _launch(self, nodes):
        self.logger.debug("WorkerTree._launch on %s (fanout=%d)", nodes,
                          self.task.info("fanout"))

        # Prepare copy params if source is defined
        destdir = None
        if self.source:
            self.logger.debug("copy self.dest=%s", self.dest)
            # Special processing to determine best arcname and destdir for tar.
            # The only case that we don't support is when source is a file and
            # dest is a dir without a finishing / (in that case we cannot
            # determine remotely whether it is a file or a directory).
            if isfile(self.source):
                # dest is not normalized here
                arcname = basename(self.dest) or basename(normpath(
                    self.source))
                destdir = dirname(self.dest)
            else:
                arcname = basename(normpath(self.source))
                destdir = os.path.normpath(self.dest)
            self.logger.debug("copy arcname=%s destdir=%s", arcname, destdir)

        # And launch stuffs
        next_hops = self._distribute(self.task.info("fanout"), nodes.copy())
        self.logger.debug("next_hops=%s" % [(str(n), str(v))
                                            for n, v in next_hops.items()])
        for gw, targets in next_hops.iteritems():
            if gw == targets:
                self.logger.debug(
                    'task.shell cmd=%s source=%s nodes=%s '
                    'timeout=%s remote=%s', self.command, self.source, nodes,
                    self.timeout, self.remote)
                self._child_count += 1
                self._target_count += len(targets)
                if self.remote:
                    if self.source:
                        self.logger.debug('_launch remote untar (destdir=%s)',
                                          destdir)
                        self.command = self.UNTAR_CMD_FMT % destdir
                        worker = self.task.shell(self.command,
                                                 nodes=targets,
                                                 timeout=self.timeout,
                                                 handler=self.metahandler,
                                                 stderr=self.stderr,
                                                 tree=False)
                    else:
                        worker = self.task.shell(self.command,
                                                 nodes=targets,
                                                 timeout=self.timeout,
                                                 handler=self.metahandler,
                                                 stderr=self.stderr,
                                                 tree=False)
                else:
                    assert self.source is None
                    worker = ExecWorker(nodes=targets,
                                        command=self.command,
                                        handler=self.metahandler,
                                        timeout=self.timeout,
                                        stderr=self.stderr)
                    self.task.schedule(worker)

                self.workers.append(worker)
                self.logger.debug("added child worker %s count=%d", worker,
                                  len(self.workers))
            else:
                self.logger.debug("trying gateway %s to reach %s", gw, targets)
                if self.source:
                    self._copy_remote(self.source, destdir, targets, gw,
                                      self.timeout)
                else:
                    self._execute_remote(self.command, targets, gw,
                                         self.timeout)

        # Copy mode: send tar data after above workers have been initialized
        if self.source:
            try:
                # create temporary tar file with all source files
                tmptar = tempfile.TemporaryFile()
                tar = tarfile.open(fileobj=tmptar, mode='w:')
                tar.add(self.source, arcname=arcname)
                tar.close()
                tmptar.flush()
                # read generated tar file and send to worker
                tmptar.seek(0)
                rbuf = tmptar.read(32768)
                while len(rbuf) > 0:
                    self.write(rbuf)
                    rbuf = tmptar.read(32768)
            except OSError, exc:
                raise WorkerError(exc)
コード例 #6
0
    def testLocalWorkerFanout(self):
        class TestRunCountChecker(EventHandler):
            def __init__(self):
                self.workers = []
                self.max_run_cnt = 0

            def ev_start(self, worker):
                self.workers.append(worker)

            def ev_read(self, worker):
                run_cnt = sum(e.registered for w in self.workers
                              for e in w._engine_clients())
                self.max_run_cnt = max(self.max_run_cnt, run_cnt)

        task = task_self()

        TEST_FANOUT = 3
        task.set_info("fanout", TEST_FANOUT)

        # TEST 1 - default worker fanout
        eh = TestRunCountChecker()
        for i in range(10):
            task.shell("echo foo", handler=eh)
        task.resume()
        # Engine fanout should be enforced
        self.assertTrue(eh.max_run_cnt <= TEST_FANOUT)

        # TEST 1bis - default worker fanout with ExecWorker
        eh = TestRunCountChecker()
        worker = ExecWorker(nodes='foo[0-9]', handler=eh, command='echo bar')
        task.schedule(worker)
        task.resume()
        # Engine fanout should be enforced
        self.assertTrue(eh.max_run_cnt <= TEST_FANOUT)

        # TEST 2 - create n x workers using worker.fanout
        eh = TestRunCountChecker()
        for i in range(10):
            task.shell("echo foo", handler=eh)._fanout = 1
        task.resume()
        # max_run_cnt should reach the total number of workers
        self.assertEqual(eh.max_run_cnt, 10)

        # TEST 2bis - create ExecWorker with multiple clients [larger fanout]
        eh = TestRunCountChecker()
        worker = ExecWorker(nodes='foo[0-9]', handler=eh, command='echo bar')
        worker._fanout = 5
        task.schedule(worker)
        task.resume()
        # max_run_cnt should reach worker._fanout
        self.assertEqual(eh.max_run_cnt, 5)

        # TEST 2ter - create ExecWorker with multiple clients [smaller fanout]
        eh = TestRunCountChecker()
        worker = ExecWorker(nodes='foo[0-9]', handler=eh, command='echo bar')
        worker._fanout = 1
        task.schedule(worker)
        task.resume()
        # max_run_cnt should reach worker._fanout
        self.assertEqual(eh.max_run_cnt, 1)

        # TEST 4 - create workers using unlimited fanout
        eh = TestRunCountChecker()
        for i in range(10):
            w = task.shell("echo foo", handler=eh)
            w._fanout = FANOUT_UNLIMITED
        task.resume()
        # max_run_cnt should reach the total number of workers
        self.assertEqual(eh.max_run_cnt, 10)

        # TEST 4bis - create ExecWorker with unlimited fanout
        eh = TestRunCountChecker()
        worker = ExecWorker(nodes='foo[0-9]', handler=eh, command='echo bar')
        worker._fanout = FANOUT_UNLIMITED
        task.schedule(worker)
        task.resume()
        # max_run_cnt should reach the total number of clients (10)
        self.assertEqual(eh.max_run_cnt, 10)
コード例 #7
0
ファイル: Tree.py プロジェクト: l-x-l/clustershell
    def _launch(self, nodes):
        self.logger.debug("WorkerTree._launch on %s (fanout=%d)", nodes,
                          self.task.info("fanout"))

        # Prepare copy params if source is defined
        destdir = None
        if self.source:
            if self.reverse:
                self.logger.debug("rcopy source=%s, dest=%s", self.source,
                                  self.dest)
                # dest is a directory
                destdir = self.dest
            else:
                self.logger.debug("copy source=%s, dest=%s", self.source,
                                  self.dest)
                # Special processing to determine best arcname and destdir for
                # tar. The only case that we don't support is when source is a
                # file and dest is a dir without a finishing / (in that case we
                # cannot determine remotely whether it is a file or a
                # directory).
                if isfile(self.source):
                    # dest is not normalized here
                    arcname = basename(self.dest) or \
                              basename(normpath(self.source))
                    destdir = dirname(self.dest)
                else:
                    # source is a directory: if dest has a trailing slash
                    # like in /tmp/ then arcname is basename(source)
                    # but if dest is /tmp/newname (without leading slash) then
                    # arcname becomes newname.
                    if self.dest[-1] == '/':
                        arcname = basename(self.source)
                    else:
                        arcname = basename(self.dest)
                    # dirname has not the same behavior when a leading slash is
                    # present, and we want that.
                    destdir = dirname(self.dest)
                self.logger.debug("copy arcname=%s destdir=%s", arcname,
                                  destdir)

        # And launch stuffs
        next_hops = self._distribute(self.task.info("fanout"), nodes.copy())
        self.logger.debug("next_hops=%s"
                          % [(str(n), str(v)) for n, v in next_hops.items()])
        for gw, targets in next_hops.iteritems():
            if gw == targets:
                self.logger.debug('task.shell cmd=%s source=%s nodes=%s '
                                  'timeout=%s remote=%s', self.command,
                                  self.source, nodes, self.timeout, self.remote)
                self._child_count += 1
                self._target_count += len(targets)
                if self.remote:
                    if self.source:
                        # Note: specific case where targets are not in topology
                        # as self.source is never used on remote gateways
                        # so we try a direct copy/rcopy:
                        self.logger.debug('_launch copy r=%s source=%s dest=%s',
                                          self.reverse, self.source, self.dest)
                        worker = self.task.copy(self.source, self.dest, targets,
                                                handler=self.metahandler,
                                                stderr=self.stderr,
                                                timeout=self.timeout,
                                                preserve=self.preserve,
                                                reverse=self.reverse,
                                                tree=False)
                    else:
                        worker = self.task.shell(self.command,
                                                 nodes=targets,
                                                 timeout=self.timeout,
                                                 handler=self.metahandler,
                                                 stderr=self.stderr,
                                                 tree=False)
                else:
                    assert self.source is None
                    worker = ExecWorker(nodes=targets,
                                        command=self.command,
                                        handler=self.metahandler,
                                        timeout=self.timeout,
                                        stderr=self.stderr)
                    self.task.schedule(worker)

                self.workers.append(worker)
                self.logger.debug("added child worker %s count=%d", worker,
                                  len(self.workers))
            else:
                self.logger.debug("trying gateway %s to reach %s", gw, targets)
                if self.source:
                    self._copy_remote(self.source, destdir, targets, gw,
                                      self.timeout, self.reverse)
                else:
                    self._execute_remote(self.command, targets, gw,
                                         self.timeout)

        # Copy mode: send tar data after above workers have been initialized
        if self.source and not self.reverse:
            try:
                # create temporary tar file with all source files
                tmptar = tempfile.TemporaryFile()
                tar = tarfile.open(fileobj=tmptar, mode='w:')
                tar.add(self.source, arcname=arcname)
                tar.close()
                tmptar.flush()
                # read generated tar file
                tmptar.seek(0)
                rbuf = tmptar.read(32768)
                # send tar data to remote targets only
                while len(rbuf) > 0:
                    self._write_remote(rbuf)
                    rbuf = tmptar.read(32768)
            except OSError as exc:
                raise WorkerError(exc)
コード例 #8
0
    def testLocalWorkerFanout(self):

        class TestRunCountChecker(EventHandler):

            def __init__(self):
                self.workers = []
                self.max_run_cnt = 0

            def ev_start(self, worker):
                self.workers.append(worker)

            def ev_read(self, worker):
                run_cnt = sum(e.registered for w in self.workers
                              for e in w._engine_clients())
                self.max_run_cnt = max(self.max_run_cnt, run_cnt)

        task = task_self()

        TEST_FANOUT = 3
        task.set_info("fanout", TEST_FANOUT)

        # TEST 1 - default worker fanout
        eh = TestRunCountChecker()
        for i in range(10):
            task.shell("echo foo", handler=eh)
        task.resume()
        # Engine fanout should be enforced
        self.assertTrue(eh.max_run_cnt <= TEST_FANOUT)

        # TEST 1bis - default worker fanout with ExecWorker
        eh = TestRunCountChecker()
        worker = ExecWorker(nodes='foo[0-9]', handler=eh, command='echo bar')
        task.schedule(worker)
        task.resume()
        # Engine fanout should be enforced
        self.assertTrue(eh.max_run_cnt <= TEST_FANOUT)

        # TEST 2 - create n x workers using worker.fanout
        eh = TestRunCountChecker()
        for i in range(10):
            task.shell("echo foo", handler=eh)._fanout = 1
        task.resume()
        # max_run_cnt should reach the total number of workers
        self.assertEqual(eh.max_run_cnt, 10)

        # TEST 2bis - create ExecWorker with multiple clients [larger fanout]
        eh = TestRunCountChecker()
        worker = ExecWorker(nodes='foo[0-9]', handler=eh, command='echo bar')
        worker._fanout = 5
        task.schedule(worker)
        task.resume()
        # max_run_cnt should reach worker._fanout
        self.assertEqual(eh.max_run_cnt, 5)

        # TEST 2ter - create ExecWorker with multiple clients [smaller fanout]
        eh = TestRunCountChecker()
        worker = ExecWorker(nodes='foo[0-9]', handler=eh, command='echo bar')
        worker._fanout = 1
        task.schedule(worker)
        task.resume()
        # max_run_cnt should reach worker._fanout
        self.assertEqual(eh.max_run_cnt, 1)

        # TEST 4 - create workers using unlimited fanout
        eh = TestRunCountChecker()
        for i in range(10):
            w = task.shell("echo foo", handler=eh)
            w._fanout = FANOUT_UNLIMITED
        task.resume()
        # max_run_cnt should reach the total number of workers
        self.assertEqual(eh.max_run_cnt, 10)

        # TEST 4bis - create ExecWorker with unlimited fanout
        eh = TestRunCountChecker()
        worker = ExecWorker(nodes='foo[0-9]', handler=eh, command='echo bar')
        worker._fanout = FANOUT_UNLIMITED
        task.schedule(worker)
        task.resume()
        # max_run_cnt should reach the total number of clients (10)
        self.assertEqual(eh.max_run_cnt, 10)