コード例 #1
0
ファイル: TaskLocalMixin.py プロジェクト: samkos/clustershell
    def test_mixed_worker_retcodes(self):
        """test Task retcode handling with mixed workers"""

        # This test case failed with CS <= 1.7.3
        # Conditions: task.max_retcode() set during runtime (not None)
        # and then a StreamWorker closing, thus calling Task._set_rc(rc=None)
        # To reproduce, we start a StreamWorker on first read of a ExecWorker.

        class TestH(EventHandler):
            def __init__(self, worker2):
                self.worker2 = worker2

            def ev_read(self, worker):
                worker.task.schedule(self.worker2)

        worker2 = StreamWorker(handler=None)
        worker1 = ExecWorker(nodes='localhost', handler=TestH(worker2),
                             command="echo ok")

        # Create pipe stream
        rfd1, wfd1 = os.pipe()
        worker2.set_reader("pipe1", rfd1, closefd=False)
        os.write(wfd1, b"test\n")
        os.close(wfd1)

        # Enable pipe1_msgtree
        task_self().set_default("pipe1_msgtree", True)

        task_self().schedule(worker1)
        task_self().run()

        self.assertEqual(worker1.node_buffer('localhost'), b"ok")
        self.assertEqual(worker1.node_retcode('localhost'), 0)
        self.assertEqual(worker2.read(sname="pipe1"), b"test")
        self.assertEqual(task_self().max_retcode(), 0)
コード例 #2
0
    def test_mixed_worker_retcodes(self):
        """test Task retcode handling with mixed workers"""

        # This test case failed with CS <= 1.7.3
        # Conditions: task.max_retcode() set during runtime (not None)
        # and then a StreamWorker closing, thus calling Task._set_rc(rc=None)
        # To reproduce, we start a StreamWorker on first read of a ExecWorker.

        class TestH(EventHandler):
            def __init__(self, worker2):
                self.worker2 = worker2

            def ev_read(self, worker):
                worker.task.schedule(self.worker2)

        worker2 = StreamWorker(handler=None)
        worker1 = ExecWorker(nodes='localhost', handler=TestH(worker2),
                             command="echo ok")

        # Create pipe stream
        rfd1, wfd1 = os.pipe()
        worker2.set_reader("pipe1", rfd1, closefd=False)
        os.write(wfd1, b"test\n")
        os.close(wfd1)

        # Enable pipe1_msgtree
        task_self().set_default("pipe1_msgtree", True)

        task_self().schedule(worker1)
        task_self().run()

        self.assertEqual(worker1.node_buffer('localhost'), b"ok")
        self.assertEqual(worker1.node_retcode('localhost'), 0)
        self.assertEqual(worker2.read(sname="pipe1"), b"test")
        self.assertEqual(task_self().max_retcode(), 0)
コード例 #3
0
    def test_006_worker_abort_on_written(self):
        """test StreamWorker abort on ev_written"""

        # This test creates a writable StreamWorker that will abort after the
        # first write, to check whether ev_written is generated in the right
        # place.

        class TestH(EventHandler):
            def __init__(self, testcase, rfd):
                self.testcase = testcase
                self.rfd = rfd
                self.check_written = 0

            def ev_written(self, worker, node, sname, size):
                self.check_written += 1
                self.testcase.assertEqual(os.read(self.rfd, 1024), b"initial")
                worker.abort()
                worker.abort()  # safe but no effect

        rfd, wfd = os.pipe()

        hdlr = TestH(self, rfd)
        worker = StreamWorker(handler=hdlr)

        worker.set_writer("test", wfd) # closefd=True
        worker.write(b"initial", "test")

        self.run_worker(worker)
        self.assertEqual(hdlr.check_written, 1)
        os.close(rfd)
コード例 #4
0
    def test_003_io_pipes(self):
        """test StreamWorker bound to pipe readers and writers"""

        # os.write -> pipe1 -> worker -> pipe2 -> os.read

        class TestH(EventHandler):
            def __init__(self, testcase):
                self.testcase = testcase
                self.worker = None
                self.pickup_count = 0
                self.hup_count = 0

            def ev_pickup(self, worker):
                self.pickup_count += 1

            def ev_read(self, worker):
                self.testcase.assertEqual(worker.current_sname, "pipe1")
                worker.write(worker.current_msg, "pipe2")

            def ev_timer(self, timer):
                # call set_write_eof on specific stream after some delay
                worker = self.worker
                self.worker = 'DONE'
                worker.set_write_eof("pipe2")

            def ev_hup(self, worker):
                # ev_hup called at the end (after set_write_eof is called)
                self.hup_count += 1
                self.testcase.assertEqual(self.worker, 'DONE')
                # no rc code should be set
                self.testcase.assertEqual(worker.current_rc, None)

        # create a StreamWorker instance bound to several pipes
        hdlr = TestH(self)
        worker = StreamWorker(handler=hdlr)
        hdlr.worker = worker

        rfd1, wfd1 = os.pipe()
        worker.set_reader("pipe1", rfd1)
        os.write(wfd1, b"Some data\n")
        os.close(wfd1)

        rfd2, wfd2 = os.pipe()
        worker.set_writer("pipe2", wfd2)

        timer1 = task_self().timer(1.0, handler=hdlr)
        self.run_worker(worker)
        self.assertEqual(os.read(rfd2, 1024), b"Some data")
        os.close(rfd2)
        # wfd2 should be closed by CS
        self.assertRaises(OSError, os.close, wfd2)
        # rfd1 should be closed by CS
        self.assertRaises(OSError, os.close, rfd1)
        # check pickup/hup
        self.assertEqual(hdlr.hup_count, 1)
        self.assertEqual(hdlr.pickup_count, 1)
        self.assertTrue(task_self().max_retcode() is None)
コード例 #5
0
    def test_002_pipe_readers(self):
        """test StreamWorker bound to several pipe readers"""

        streams = {
            "pipe1_reader": "Some data to read from a pipe",
            "stderr": "Error data to read using special keyword stderr",
            "pipe2_reader": "Other data to read from another pipe",
            "pipe3_reader": "Cool data to read from a third pipe"
        }

        class TestH(EventHandler):
            def __init__(self, testcase):
                self.snames = set()
                self.testcase = testcase

            def ev_error(self, worker):
                # test that ev_error is called in case of 'stderr' stream name
                self.testcase.assertEqual(worker.current_sname, "stderr")
                self.recv_msg(worker.current_errmsg)

            def ev_read(self, worker):
                self.recv_msg(worker.current_msg)

            def recv_msg(self, msg):
                self.testcase.assertTrue(len(self.snames) < len(streams))
                self.testcase.assertEqual(streams[worker.current_sname], msg)
                self.snames.add(worker.current_sname)
                if len(self.snames) == len(streams):
                    # before finishing, try to add another pipe at
                    # runtime: this is NOT allowed
                    rfd, wfd = os.pipe()
                    self.testcase.assertRaises(WorkerError, worker.set_reader,
                                               "pipe4_reader", rfd)
                    self.testcase.assertRaises(WorkerError, worker.set_writer,
                                               "pipe4_writer", wfd)
                    os.close(rfd)
                    os.close(wfd)

        # create a StreamWorker instance bound to several pipes
        hdlr = TestH(self)
        worker = StreamWorker(handler=hdlr)

        for sname in streams.keys():
            rfd, wfd = os.pipe()
            worker.set_reader(sname, rfd)
            os.write(wfd, streams[sname])
            os.close(wfd)

        self.run_worker(worker)

        # check that all ev_read have been received
        self.assertEqual(
            set(("pipe1_reader", "pipe2_reader", "pipe3_reader", "stderr")),
            hdlr.snames)
コード例 #6
0
    def test_002_pipe_readers(self):
        """test StreamWorker bound to several pipe readers"""

        streams = { "pipe1_reader": b"Some data to read from a pipe",
                    "stderr": b"Error data to read using special keyword stderr",
                    "pipe2_reader": b"Other data to read from another pipe",
                    "pipe3_reader": b"Cool data to read from a third pipe" }

        class TestH(EventHandler):
            def __init__(self, testcase):
                self.snames = set()
                self.testcase = testcase

            def ev_error(self, worker):
                # test that ev_error is called in case of 'stderr' stream name
                self.testcase.assertEqual(worker.current_sname, "stderr")
                self.recv_msg(worker.current_errmsg)

            def ev_read(self, worker):
                self.recv_msg(worker.current_msg)

            def recv_msg(self, msg):
                self.testcase.assertTrue(len(self.snames) < len(streams))
                self.testcase.assertEqual(streams[worker.current_sname], msg)
                self.snames.add(worker.current_sname)
                if len(self.snames) == len(streams):
                    # before finishing, try to add another pipe at
                    # runtime: this is NOT allowed
                    rfd, wfd = os.pipe()
                    self.testcase.assertRaises(WorkerError,
                        worker.set_reader, "pipe4_reader", rfd)
                    self.testcase.assertRaises(WorkerError,
                        worker.set_writer, "pipe4_writer", wfd)
                    os.close(rfd)
                    os.close(wfd)

        # create a StreamWorker instance bound to several pipes
        hdlr = TestH(self)
        worker = StreamWorker(handler=hdlr)

        for sname in streams.keys():
            rfd, wfd = os.pipe()
            worker.set_reader(sname, rfd)
            os.write(wfd, streams[sname])
            os.close(wfd)

        self.run_worker(worker)

        # check that all ev_read have been received
        self.assertEqual(set(("pipe1_reader", "pipe2_reader", "pipe3_reader",
                              "stderr")), hdlr.snames)
コード例 #7
0
    def test_007_worker_abort_on_written_eof(self):
        """test StreamWorker abort on ev_written (with EOF)"""

        # This test is similar to previous test test_006 but does
        # write() + set_write_eof().

        class TestH(EventHandler):
            def __init__(self, testcase, rfd):
                self.testcase = testcase
                self.rfd = rfd
                self.check_written = 0

            def ev_written(self, worker, node, sname, size):
                self.check_written += 1
                self.testcase.assertEqual(os.read(self.rfd, 1024), b"initial")
                worker.abort()

        rfd, wfd = os.pipe()

        hdlr = TestH(self, rfd)
        worker = StreamWorker(handler=hdlr)

        worker.set_writer("test", wfd) # closefd=True
        worker.write(b"initial", "test")
        worker.set_write_eof()

        self.run_worker(worker)
        self.assertEqual(hdlr.check_written, 1)
        os.close(rfd)
コード例 #8
0
class Gateway(object):
    """Gateway special test class.

    Initialize a GatewayChannel through a R/W StreamWorker like a real
    remote ClusterShell Gateway but:
        - using pipes to communicate,
        - running on a dedicated task/thread.
    """

    def __init__(self):
        """init Gateway bound objects"""
        self.task = Task()
        self.channel = GatewayChannel(self.task)
        self.worker = StreamWorker(handler=self.channel)
        # create communication pipes
        self.pipe_stdin = os.pipe()
        self.pipe_stdout = os.pipe()
        # avoid nonblocking flag as we want recv/read() to block
        self.worker.set_reader(self.channel.SNAME_READER,
                               self.pipe_stdin[0])
        self.worker.set_writer(self.channel.SNAME_WRITER,
                               self.pipe_stdout[1], retain=False)
        self.task.schedule(self.worker)
        self.task.resume()

    def send(self, msg):
        """send msg (bytes) to pseudo stdin"""
        os.write(self.pipe_stdin[1], msg + b'\n')

    def send_str(self, msgstr):
        """send msg (string) to pseudo stdin"""
        self.send(msgstr.encode())

    def recv(self):
        """recv buf from pseudo stdout (blocking call)"""
        return os.read(self.pipe_stdout[0], 4096)

    def wait(self):
        """wait for task/thread termination"""
        # can be blocked indefinitely if StreamWorker doesn't complete
        self.task.join()

    def close(self):
        """close parent fds"""
        os.close(self.pipe_stdout[0])
        os.close(self.pipe_stdin[1])

    def destroy(self):
        """abort task/thread"""
        self.task.abort(kill=True)
コード例 #9
0
ファイル: TreeGatewayTest.py プロジェクト: ypsah/clustershell
 def __init__(self, gwhost):
     """init Gateway bound objects"""
     self.task = Task()
     self.gwhost = gwhost
     self.channel = GatewayChannel(self.task, gwhost)
     self.worker = StreamWorker(handler=self.channel)
     # create communication pipes
     self.pipe_stdin = os.pipe()
     self.pipe_stdout = os.pipe()
     # avoid nonblocking flag as we want recv/read() to block
     self.worker.set_reader('r-stdin', self.pipe_stdin[0])
     self.worker.set_writer('w-stdout', self.pipe_stdout[1], retain=False)
     self.task.schedule(self.worker)
     self.task.resume()
コード例 #10
0
    def test_009_worker_abort_on_close(self):
        """test StreamWorker abort() on closing worker"""
        class TestH(EventHandler):
            def __init__(self, testcase, rfd):
                self.testcase = testcase
                self.rfd = rfd
                self.check_close = 0

            def ev_close(self, worker, timedout):
                self.check_close += 1
                self.testcase.assertFalse(timedout)
                os.close(self.rfd)
                worker.abort()
                worker.abort()  # safe but no effect

        rfd, wfd = os.pipe()

        hdlr = TestH(self, rfd)
        worker = StreamWorker(handler=hdlr)

        worker.set_writer("test", wfd)  # closefd=True
        worker.write(b"initial", "test")
        worker.set_write_eof()

        self.run_worker(worker)
        self.assertEqual(hdlr.check_close, 1)
コード例 #11
0
class Gateway(object):
    """Gateway special test class.

    Initialize a GatewayChannel through a R/W StreamWorker like a real
    remote ClusterShell Gateway but:
        - using pipes to communicate,
        - running on a dedicated task/thread.
    """

    def __init__(self):
        """init Gateway bound objects"""
        self.task = Task()
        self.channel = GatewayChannel(self.task)
        self.worker = StreamWorker(handler=self.channel)
        # create communication pipes
        self.pipe_stdin = os.pipe()
        self.pipe_stdout = os.pipe()
        # avoid nonblocking flag as we want recv/read() to block
        self.worker.set_reader(self.channel.SNAME_READER,
                               self.pipe_stdin[0])
        self.worker.set_writer(self.channel.SNAME_WRITER,
                               self.pipe_stdout[1], retain=False)
        self.task.schedule(self.worker)
        self.task.resume()

    def send(self, msg):
        """send msg (bytes) to pseudo stdin"""
        os.write(self.pipe_stdin[1], msg + b'\n')

    def send_str(self, msgstr):
        """send msg (string) to pseudo stdin"""
        self.send(msgstr.encode())

    def recv(self):
        """recv buf from pseudo stdout (blocking call)"""
        return os.read(self.pipe_stdout[0], 4096)

    def wait(self):
        """wait for task/thread termination"""
        # can be blocked indefinitely if StreamWorker doesn't complete
        self.task.join()

    def close(self):
        """close parent fds"""
        os.close(self.pipe_stdout[0])
        os.close(self.pipe_stdin[1])

    def destroy(self):
        """abort task/thread"""
        self.task.abort(kill=True)
コード例 #12
0
    def test_005_timeout_events(self):
        """test StreamWorker with timeout set (event based)"""
        class TestH(EventHandler):
            def __init__(self, testcase):
                self.testcase = testcase
                self.ev_pickup_called = False
                self.ev_read_called = False
                self.ev_hup_called = False
                self.ev_timeout_called = False

            def ev_pickup(self, worker):
                self.ev_pickup_called = True

            def ev_read(self, worker):
                self.ev_read_called = True
                self.testcase.assertEqual(worker.current_sname, "pipe1")
                self.testcase.assertEqual(worker.current_msg, b"Some data")

            def ev_hup(self, worker):
                # ev_hup is called but no rc code should be set
                self.ev_hup_called = True
                self.testcase.assertEqual(worker.current_rc, None)

            def ev_timeout(self, worker):
                self.ev_timeout_called = True

        hdlr = TestH(self)
        worker = StreamWorker(handler=hdlr, timeout=0.5)

        # Create pipe stream with closefd set (default)
        rfd1, wfd1 = os.pipe()
        worker.set_reader("pipe1", rfd1)
        # Write some chars without line break (worst case)
        os.write(wfd1, b"Some data")
        # TEST: Do not close wfd1 to simulate open stream

        self.run_worker(worker)
        self.assertTrue(hdlr.ev_timeout_called)
        self.assertTrue(hdlr.ev_read_called)
        self.assertTrue(hdlr.ev_pickup_called)
        self.assertTrue(hdlr.ev_hup_called)

        # rfd1 should be already closed by CS
        self.assertRaises(OSError, os.close, rfd1)
        os.close(wfd1)
コード例 #13
0
    def test_009_worker_abort_on_close(self):
        """test StreamWorker abort() on closing worker"""

        class TestH(EventHandler):
            def __init__(self, testcase, rfd):
                self.testcase = testcase
                self.rfd = rfd
                self.check_close = 0

            def ev_close(self, worker, timedout):
                self.check_close += 1
                self.testcase.assertFalse(timedout)
                os.close(self.rfd)
                worker.abort()
                worker.abort()  # safe but no effect

        rfd, wfd = os.pipe()

        hdlr = TestH(self, rfd)
        worker = StreamWorker(handler=hdlr)

        worker.set_writer("test", wfd)  # closefd=True
        worker.write(b"initial", "test")
        worker.set_write_eof()

        self.run_worker(worker)
        self.assertEqual(hdlr.check_close, 1)
コード例 #14
0
    def test_007_worker_abort_on_written_eof(self):
        """test StreamWorker abort on ev_written (with EOF)"""

        # This test is similar to previous test test_006 but does
        # write() + set_write_eof().

        class TestH(EventHandler):
            def __init__(self, testcase, rfd):
                self.testcase = testcase
                self.rfd = rfd
                self.check_written = 0

            def ev_written(self, worker, node, sname, size):
                self.check_written += 1
                self.testcase.assertEqual(os.read(self.rfd, 1024), b"initial")
                worker.abort()
                worker.abort()  # safe but no effect

        rfd, wfd = os.pipe()

        hdlr = TestH(self, rfd)
        worker = StreamWorker(handler=hdlr)

        worker.set_writer("test", wfd) # closefd=True
        worker.write(b"initial", "test")
        worker.set_write_eof()

        self.run_worker(worker)
        self.assertEqual(hdlr.check_written, 1)
        os.close(rfd)
コード例 #15
0
    def test_005_timeout_events(self):
        """test StreamWorker with timeout set (event based)"""
        class TestH(EventHandler):
            def __init__(self, testcase):
                self.testcase = testcase
                self.ev_pickup_called = False
                self.ev_read_called = False
                self.ev_hup_called = False
                self.ev_timeout_called = False

            def ev_pickup(self, worker):
                self.ev_pickup_called = True

            def ev_read(self, worker):
                self.ev_read_called = True
                self.testcase.assertEqual(worker.current_sname, "pipe1")
                self.testcase.assertEqual(worker.current_msg, b"Some data")

            def ev_hup(self, worker):
                # ev_hup is called but no rc code should be set
                self.ev_hup_called = True
                self.testcase.assertEqual(worker.current_rc, None)

            def ev_timeout(self, worker):
                self.ev_timeout_called = True

        hdlr = TestH(self)
        worker = StreamWorker(handler=hdlr, timeout=0.5)

        # Create pipe stream with closefd set (default)
        rfd1, wfd1 = os.pipe()
        worker.set_reader("pipe1", rfd1)
        # Write some chars without line break (worst case)
        os.write(wfd1, b"Some data")
        # TEST: Do not close wfd1 to simulate open stream

        self.run_worker(worker)
        self.assertTrue(hdlr.ev_timeout_called)
        self.assertTrue(hdlr.ev_read_called)
        self.assertTrue(hdlr.ev_pickup_called)
        self.assertTrue(hdlr.ev_hup_called)

        # rfd1 should be already closed by CS
        self.assertRaises(OSError, os.close, rfd1)
        os.close(wfd1)
コード例 #16
0
ファイル: Gateway.py プロジェクト: dupgit/clustershell
def gateway_main():
    """ClusterShell gateway entry point"""
    host = _getshorthostname()
    # configure root logger
    logdir = os.path.expanduser(
        os.environ.get('CLUSTERSHELL_GW_LOG_DIR', '/tmp'))
    loglevel = os.environ.get('CLUSTERSHELL_GW_LOG_LEVEL', 'INFO')
    try:
        log_level = getattr(logging, loglevel.upper(), logging.INFO)
        log_fmt = '%(asctime)s %(name)s %(levelname)s %(message)s'
        logging.basicConfig(level=log_level,
                            format=log_fmt,
                            filename=os.path.join(logdir, "%s.gw.log" % host))
    except (IOError, OSError):
        pass  # logging failure is not fatal

    logger = logging.getLogger(__name__)
    sys.excepthook = gateway_excepthook

    logger.debug('Starting gateway on %s', host)
    logger.debug("environ=%s", os.environ)

    set_nonblock_flag(sys.stdin.fileno())
    set_nonblock_flag(sys.stdout.fileno())
    set_nonblock_flag(sys.stderr.fileno())

    task = task_self()

    # Disable MsgTree buffering, it is enabled later when needed
    task.set_default("stdout_msgtree", False)
    task.set_default("stderr_msgtree", False)

    if sys.stdin.isatty():
        logger.critical('Gateway failure: sys.stdin.isatty() is True')
        sys.exit(1)

    gateway = GatewayChannel(task)
    worker = StreamWorker(handler=gateway)
    # Define worker._fanout to not rely on the engine's fanout, and use
    # the special value FANOUT_UNLIMITED to always allow registration
    worker._fanout = FANOUT_UNLIMITED
    worker.set_reader(gateway.SNAME_READER, sys.stdin)
    worker.set_writer(gateway.SNAME_WRITER, sys.stdout, retain=False)
    # must stay disabled for now (see #274)
    #worker.set_writer(gateway.SNAME_ERROR, sys.stderr, retain=False)
    task.schedule(worker)
    logger.debug('Starting task')
    try:
        task.resume()
        logger.debug('Task performed')
    except EngineAbortException as exc:
        logger.debug('EngineAbortException')
    except IOError as exc:
        logger.debug('Broken pipe (%s)', exc)
        raise
    except Exception as exc:
        logger.exception('Gateway failure: %s', exc)
    logger.debug('-------- The End --------')
コード例 #17
0
    def test_006_worker_abort_on_written(self):
        """test StreamWorker abort on ev_written"""

        # This test creates a writable StreamWorker that will abort after the
        # first write, to check whether ev_written is generated in the right
        # place.

        class TestH(EventHandler):
            def __init__(self, testcase, rfd):
                self.testcase = testcase
                self.rfd = rfd
                self.check_written = 0

            def ev_written(self, worker, node, sname, size):
                self.check_written += 1
                self.testcase.assertEqual(os.read(self.rfd, 1024), b"initial")
                worker.abort()

        rfd, wfd = os.pipe()

        hdlr = TestH(self, rfd)
        worker = StreamWorker(handler=hdlr)

        worker.set_writer("test", wfd) # closefd=True
        worker.write(b"initial", "test")

        self.run_worker(worker)
        self.assertEqual(hdlr.check_written, 1)
        os.close(rfd)
コード例 #18
0
    def test_004_timeout_on_open_stream(self):
        """test StreamWorker with timeout set on open stream"""
        # Create worker set with timeout
        worker = StreamWorker(handler=None, timeout=0.5)

        # Create pipe stream
        rfd1, wfd1 = os.pipe()
        worker.set_reader("pipe1", rfd1, closefd=False)
        # Write some chars without line break (worst case)
        os.write(wfd1, "Some data")
        # TEST: Do not close wfd1 to simulate open stream

        # Need to enable pipe1_msgtree
        task_self().set_default("pipe1_msgtree", True)
        self.run_worker(worker)

        # Timeout occured - read buffer should have been flushed
        self.assertEqual(worker.read(sname="pipe1"), "Some data")

        # closefd was set, we should be able to close pipe fds
        os.close(rfd1)
        os.close(wfd1)
コード例 #19
0
    def test_004_timeout_on_open_stream(self):
        """test StreamWorker with timeout set on open stream"""
        # Create worker set with timeout
        worker = StreamWorker(handler=None, timeout=0.5)

        # Create pipe stream
        rfd1, wfd1 = os.pipe()
        worker.set_reader("pipe1", rfd1, closefd=False)
        # Write some chars without line break (worst case)
        os.write(wfd1, b"Some data")
        # TEST: Do not close wfd1 to simulate open stream

        # Need to enable pipe1_msgtree
        task_self().set_default("pipe1_msgtree", True)
        self.run_worker(worker)

        # Timeout occured - read buffer should have been flushed
        self.assertEqual(worker.read(sname="pipe1"), b"Some data")

        # closefd was set, we should be able to close pipe fds
        os.close(rfd1)
        os.close(wfd1)
コード例 #20
0
ファイル: Gateway.py プロジェクト: cea-hpc/clustershell
def gateway_main():
    """ClusterShell gateway entry point"""
    host = _getshorthostname()
    # configure root logger
    logdir = os.path.expanduser(os.environ.get('CLUSTERSHELL_GW_LOG_DIR',
                                               '/tmp'))
    loglevel = os.environ.get('CLUSTERSHELL_GW_LOG_LEVEL', 'INFO')
    try:
        log_level = getattr(logging, loglevel.upper(), logging.INFO)
        log_fmt = '%(asctime)s %(name)s %(levelname)s %(message)s'
        logging.basicConfig(level=log_level, format=log_fmt,
                            filename=os.path.join(logdir, "%s.gw.log" % host))
    except (IOError, OSError):
        pass  # logging failure is not fatal

    logger = logging.getLogger(__name__)
    sys.excepthook = gateway_excepthook

    logger.debug('Starting gateway on %s', host)
    logger.debug("environ=%s", os.environ)


    set_nonblock_flag(sys.stdin.fileno())
    set_nonblock_flag(sys.stdout.fileno())
    set_nonblock_flag(sys.stderr.fileno())

    task = task_self()

    # Disable MsgTree buffering, it is enabled later when needed
    task.set_default("stdout_msgtree", False)
    task.set_default("stderr_msgtree", False)

    if sys.stdin.isatty():
        logger.critical('Gateway failure: sys.stdin.isatty() is True')
        sys.exit(1)

    gateway = GatewayChannel(task)
    worker = StreamWorker(handler=gateway)
    # Define worker._fanout to not rely on the engine's fanout, and use
    # the special value FANOUT_UNLIMITED to always allow registration
    worker._fanout = FANOUT_UNLIMITED
    worker.set_reader(gateway.SNAME_READER, sys.stdin)
    worker.set_writer(gateway.SNAME_WRITER, sys.stdout, retain=False)
    # must stay disabled for now (see #274)
    #worker.set_writer(gateway.SNAME_ERROR, sys.stderr, retain=False)
    task.schedule(worker)
    logger.debug('Starting task')
    try:
        task.resume()
        logger.debug('Task performed')
    except EngineAbortException as exc:
        logger.debug('EngineAbortException')
    except IOError as exc:
        logger.debug('Broken pipe (%s)', exc)
        raise
    except Exception as exc:
        logger.exception('Gateway failure: %s', exc)
    logger.debug('-------- The End --------')
コード例 #21
0
 def __init__(self, gwhost):
     """init Gateway bound objects"""
     self.task = Task()
     self.gwhost = gwhost
     self.channel = GatewayChannel(self.task, gwhost)
     self.worker = StreamWorker(handler=self.channel)
     # create communication pipes
     self.pipe_stdin = os.pipe()
     self.pipe_stdout = os.pipe()
     # avoid nonblocking flag as we want recv/read() to block
     self.worker.set_reader('r-stdin', self.pipe_stdin[0])
     self.worker.set_writer('w-stdout', self.pipe_stdout[1], retain=False)
     self.task.schedule(self.worker)
     self.task.resume()
コード例 #22
0
    def test_008_broken_pipe_on_write(self):
        """test StreamWorker with broken pipe on write()"""

        # This test creates a writable StreamWorker that will close the read
        # side of the pipe just after the first write to generate a broken
        # pipe error.

        class TestH(EventHandler):
            def __init__(self, testcase, rfd):
                self.testcase = testcase
                self.rfd = rfd
                self.check_hup = 0
                self.check_written = 0

            def ev_hup(self, worker):
                self.check_hup += 1

            def ev_written(self, worker, node, sname, size):
                self.check_written += 1
                self.testcase.assertEqual(os.read(self.rfd, 1024), b"initial")
                # close reader, that will stop the StreamWorker
                os.close(self.rfd)
                # The following write call used to raise broken pipe before
                # version 1.7.2.
                worker.write(b"final")

        rfd, wfd = os.pipe()

        hdlr = TestH(self, rfd)
        worker = StreamWorker(handler=hdlr)

        worker.set_writer("test", wfd) # closefd=True
        worker.write(b"initial", "test")

        self.run_worker(worker)
        self.assertEqual(hdlr.check_hup, 1)
        self.assertEqual(hdlr.check_written, 1)
コード例 #23
0
    def test_003_io_pipes(self):
        """test StreamWorker bound to pipe readers and writers"""

        # os.write -> pipe1 -> worker -> pipe2 -> os.read

        class TestH(EventHandler):
            def __init__(self, testcase):
                self.testcase = testcase
                self.worker = None
                self.pickup_count = 0
                self.hup_count = 0

            def ev_pickup(self, worker):
                self.pickup_count += 1

            def ev_read(self, worker):
                self.testcase.assertEqual(worker.current_sname, "pipe1")
                worker.write(worker.current_msg, "pipe2")

            def ev_timer(self, timer):
                # call set_write_eof on specific stream after some delay
                worker = self.worker
                self.worker = 'DONE'
                worker.set_write_eof("pipe2")

            def ev_hup(self, worker):
                # ev_hup called at the end (after set_write_eof is called)
                self.hup_count += 1
                self.testcase.assertEqual(self.worker, 'DONE')
                # no rc code should be set
                self.testcase.assertEqual(worker.current_rc, None)

        # create a StreamWorker instance bound to several pipes
        hdlr = TestH(self)
        worker = StreamWorker(handler=hdlr)
        hdlr.worker = worker

        rfd1, wfd1 = os.pipe()
        worker.set_reader("pipe1", rfd1)
        os.write(wfd1, b"Some data\n")
        os.close(wfd1)

        rfd2, wfd2 = os.pipe()
        worker.set_writer("pipe2", wfd2)

        timer1 = task_self().timer(1.0, handler=hdlr)
        self.run_worker(worker)
        self.assertEqual(os.read(rfd2, 1024), b"Some data")
        os.close(rfd2)
        # wfd2 should be closed by CS
        self.assertRaises(OSError, os.close, wfd2)
        # rfd1 should be closed by CS
        self.assertRaises(OSError, os.close, rfd1)
        # check pickup/hup
        self.assertEqual(hdlr.hup_count, 1)
        self.assertEqual(hdlr.pickup_count, 1)
        self.assertTrue(task_self().max_retcode() is None)
コード例 #24
0
    def test_008_broken_pipe_on_write(self):
        """test StreamWorker with broken pipe on write()"""

        # This test creates a writable StreamWorker that will close the read
        # side of the pipe just after the first write to generate a broken
        # pipe error.

        class TestH(EventHandler):
            def __init__(self, testcase, rfd):
                self.testcase = testcase
                self.rfd = rfd
                self.check_hup = 0
                self.check_written = 0

            def ev_hup(self, worker):
                self.check_hup += 1

            def ev_written(self, worker, node, sname, size):
                self.check_written += 1
                self.testcase.assertEqual(os.read(self.rfd, 1024), b"initial")
                # close reader, that will stop the StreamWorker
                os.close(self.rfd)
                # The following write call used to raise broken pipe before
                # version 1.7.2.
                worker.write(b"final")

        rfd, wfd = os.pipe()

        hdlr = TestH(self, rfd)
        worker = StreamWorker(handler=hdlr)

        worker.set_writer("test", wfd) # closefd=True
        worker.write(b"initial", "test")

        self.run_worker(worker)
        self.assertEqual(hdlr.check_hup, 1)
        self.assertEqual(hdlr.check_written, 1)
コード例 #25
0
 def test_001_empty(self):
     """test empty StreamWorker"""
     # that makes no sense but well...
     # handler=None is supported by base Worker class
     self.run_worker(StreamWorker(handler=None))