def test_006_worker_abort_on_written(self): """test StreamWorker abort on ev_written""" # This test creates a writable StreamWorker that will abort after the # first write, to check whether ev_written is generated in the right # place. class TestH(EventHandler): def __init__(self, testcase, rfd): self.testcase = testcase self.rfd = rfd self.check_written = 0 def ev_written(self, worker, node, sname, size): self.check_written += 1 self.testcase.assertEqual(os.read(self.rfd, 1024), b"initial") worker.abort() worker.abort() # safe but no effect rfd, wfd = os.pipe() hdlr = TestH(self, rfd) worker = StreamWorker(handler=hdlr) worker.set_writer("test", wfd) # closefd=True worker.write(b"initial", "test") self.run_worker(worker) self.assertEqual(hdlr.check_written, 1) os.close(rfd)
def test_009_worker_abort_on_close(self): """test StreamWorker abort() on closing worker""" class TestH(EventHandler): def __init__(self, testcase, rfd): self.testcase = testcase self.rfd = rfd self.check_close = 0 def ev_close(self, worker, timedout): self.check_close += 1 self.testcase.assertFalse(timedout) os.close(self.rfd) worker.abort() worker.abort() # safe but no effect rfd, wfd = os.pipe() hdlr = TestH(self, rfd) worker = StreamWorker(handler=hdlr) worker.set_writer("test", wfd) # closefd=True worker.write(b"initial", "test") worker.set_write_eof() self.run_worker(worker) self.assertEqual(hdlr.check_close, 1)
def test_007_worker_abort_on_written_eof(self): """test StreamWorker abort on ev_written (with EOF)""" # This test is similar to previous test test_006 but does # write() + set_write_eof(). class TestH(EventHandler): def __init__(self, testcase, rfd): self.testcase = testcase self.rfd = rfd self.check_written = 0 def ev_written(self, worker, node, sname, size): self.check_written += 1 self.testcase.assertEqual(os.read(self.rfd, 1024), b"initial") worker.abort() worker.abort() # safe but no effect rfd, wfd = os.pipe() hdlr = TestH(self, rfd) worker = StreamWorker(handler=hdlr) worker.set_writer("test", wfd) # closefd=True worker.write(b"initial", "test") worker.set_write_eof() self.run_worker(worker) self.assertEqual(hdlr.check_written, 1) os.close(rfd)
def test_007_worker_abort_on_written_eof(self): """test StreamWorker abort on ev_written (with EOF)""" # This test is similar to previous test test_006 but does # write() + set_write_eof(). class TestH(EventHandler): def __init__(self, testcase, rfd): self.testcase = testcase self.rfd = rfd self.check_written = 0 def ev_written(self, worker, node, sname, size): self.check_written += 1 self.testcase.assertEqual(os.read(self.rfd, 1024), b"initial") worker.abort() rfd, wfd = os.pipe() hdlr = TestH(self, rfd) worker = StreamWorker(handler=hdlr) worker.set_writer("test", wfd) # closefd=True worker.write(b"initial", "test") worker.set_write_eof() self.run_worker(worker) self.assertEqual(hdlr.check_written, 1) os.close(rfd)
def test_006_worker_abort_on_written(self): """test StreamWorker abort on ev_written""" # This test creates a writable StreamWorker that will abort after the # first write, to check whether ev_written is generated in the right # place. class TestH(EventHandler): def __init__(self, testcase, rfd): self.testcase = testcase self.rfd = rfd self.check_written = 0 def ev_written(self, worker, node, sname, size): self.check_written += 1 self.testcase.assertEqual(os.read(self.rfd, 1024), b"initial") worker.abort() rfd, wfd = os.pipe() hdlr = TestH(self, rfd) worker = StreamWorker(handler=hdlr) worker.set_writer("test", wfd) # closefd=True worker.write(b"initial", "test") self.run_worker(worker) self.assertEqual(hdlr.check_written, 1) os.close(rfd)
def gateway_main(): """ClusterShell gateway entry point""" host = _getshorthostname() # configure root logger logdir = os.path.expanduser(os.environ.get('CLUSTERSHELL_GW_LOG_DIR', '/tmp')) loglevel = os.environ.get('CLUSTERSHELL_GW_LOG_LEVEL', 'INFO') try: log_level = getattr(logging, loglevel.upper(), logging.INFO) log_fmt = '%(asctime)s %(name)s %(levelname)s %(message)s' logging.basicConfig(level=log_level, format=log_fmt, filename=os.path.join(logdir, "%s.gw.log" % host)) except (IOError, OSError): pass # logging failure is not fatal logger = logging.getLogger(__name__) sys.excepthook = gateway_excepthook logger.debug('Starting gateway on %s', host) logger.debug("environ=%s", os.environ) set_nonblock_flag(sys.stdin.fileno()) set_nonblock_flag(sys.stdout.fileno()) set_nonblock_flag(sys.stderr.fileno()) task = task_self() # Disable MsgTree buffering, it is enabled later when needed task.set_default("stdout_msgtree", False) task.set_default("stderr_msgtree", False) if sys.stdin.isatty(): logger.critical('Gateway failure: sys.stdin.isatty() is True') sys.exit(1) gateway = GatewayChannel(task) worker = StreamWorker(handler=gateway) # Define worker._fanout to not rely on the engine's fanout, and use # the special value FANOUT_UNLIMITED to always allow registration worker._fanout = FANOUT_UNLIMITED worker.set_reader(gateway.SNAME_READER, sys.stdin) worker.set_writer(gateway.SNAME_WRITER, sys.stdout, retain=False) # must stay disabled for now (see #274) #worker.set_writer(gateway.SNAME_ERROR, sys.stderr, retain=False) task.schedule(worker) logger.debug('Starting task') try: task.resume() logger.debug('Task performed') except EngineAbortException as exc: logger.debug('EngineAbortException') except IOError as exc: logger.debug('Broken pipe (%s)', exc) raise except Exception as exc: logger.exception('Gateway failure: %s', exc) logger.debug('-------- The End --------')
def gateway_main(): """ClusterShell gateway entry point""" host = _getshorthostname() # configure root logger logdir = os.path.expanduser( os.environ.get('CLUSTERSHELL_GW_LOG_DIR', '/tmp')) loglevel = os.environ.get('CLUSTERSHELL_GW_LOG_LEVEL', 'INFO') try: log_level = getattr(logging, loglevel.upper(), logging.INFO) log_fmt = '%(asctime)s %(name)s %(levelname)s %(message)s' logging.basicConfig(level=log_level, format=log_fmt, filename=os.path.join(logdir, "%s.gw.log" % host)) except (IOError, OSError): pass # logging failure is not fatal logger = logging.getLogger(__name__) sys.excepthook = gateway_excepthook logger.debug('Starting gateway on %s', host) logger.debug("environ=%s", os.environ) set_nonblock_flag(sys.stdin.fileno()) set_nonblock_flag(sys.stdout.fileno()) set_nonblock_flag(sys.stderr.fileno()) task = task_self() # Disable MsgTree buffering, it is enabled later when needed task.set_default("stdout_msgtree", False) task.set_default("stderr_msgtree", False) if sys.stdin.isatty(): logger.critical('Gateway failure: sys.stdin.isatty() is True') sys.exit(1) gateway = GatewayChannel(task) worker = StreamWorker(handler=gateway) # Define worker._fanout to not rely on the engine's fanout, and use # the special value FANOUT_UNLIMITED to always allow registration worker._fanout = FANOUT_UNLIMITED worker.set_reader(gateway.SNAME_READER, sys.stdin) worker.set_writer(gateway.SNAME_WRITER, sys.stdout, retain=False) # must stay disabled for now (see #274) #worker.set_writer(gateway.SNAME_ERROR, sys.stderr, retain=False) task.schedule(worker) logger.debug('Starting task') try: task.resume() logger.debug('Task performed') except EngineAbortException as exc: logger.debug('EngineAbortException') except IOError as exc: logger.debug('Broken pipe (%s)', exc) raise except Exception as exc: logger.exception('Gateway failure: %s', exc) logger.debug('-------- The End --------')
def test_003_io_pipes(self): """test StreamWorker bound to pipe readers and writers""" # os.write -> pipe1 -> worker -> pipe2 -> os.read class TestH(EventHandler): def __init__(self, testcase): self.testcase = testcase self.worker = None self.pickup_count = 0 self.hup_count = 0 def ev_pickup(self, worker): self.pickup_count += 1 def ev_read(self, worker): self.testcase.assertEqual(worker.current_sname, "pipe1") worker.write(worker.current_msg, "pipe2") def ev_timer(self, timer): # call set_write_eof on specific stream after some delay worker = self.worker self.worker = 'DONE' worker.set_write_eof("pipe2") def ev_hup(self, worker): # ev_hup called at the end (after set_write_eof is called) self.hup_count += 1 self.testcase.assertEqual(self.worker, 'DONE') # no rc code should be set self.testcase.assertEqual(worker.current_rc, None) # create a StreamWorker instance bound to several pipes hdlr = TestH(self) worker = StreamWorker(handler=hdlr) hdlr.worker = worker rfd1, wfd1 = os.pipe() worker.set_reader("pipe1", rfd1) os.write(wfd1, b"Some data\n") os.close(wfd1) rfd2, wfd2 = os.pipe() worker.set_writer("pipe2", wfd2) timer1 = task_self().timer(1.0, handler=hdlr) self.run_worker(worker) self.assertEqual(os.read(rfd2, 1024), b"Some data") os.close(rfd2) # wfd2 should be closed by CS self.assertRaises(OSError, os.close, wfd2) # rfd1 should be closed by CS self.assertRaises(OSError, os.close, rfd1) # check pickup/hup self.assertEqual(hdlr.hup_count, 1) self.assertEqual(hdlr.pickup_count, 1) self.assertTrue(task_self().max_retcode() is None)
class Gateway(object): """Gateway special test class. Initialize a GatewayChannel through a R/W StreamWorker like a real remote ClusterShell Gateway but: - using pipes to communicate, - running on a dedicated task/thread. """ def __init__(self): """init Gateway bound objects""" self.task = Task() self.channel = GatewayChannel(self.task) self.worker = StreamWorker(handler=self.channel) # create communication pipes self.pipe_stdin = os.pipe() self.pipe_stdout = os.pipe() # avoid nonblocking flag as we want recv/read() to block self.worker.set_reader(self.channel.SNAME_READER, self.pipe_stdin[0]) self.worker.set_writer(self.channel.SNAME_WRITER, self.pipe_stdout[1], retain=False) self.task.schedule(self.worker) self.task.resume() def send(self, msg): """send msg (bytes) to pseudo stdin""" os.write(self.pipe_stdin[1], msg + b'\n') def send_str(self, msgstr): """send msg (string) to pseudo stdin""" self.send(msgstr.encode()) def recv(self): """recv buf from pseudo stdout (blocking call)""" return os.read(self.pipe_stdout[0], 4096) def wait(self): """wait for task/thread termination""" # can be blocked indefinitely if StreamWorker doesn't complete self.task.join() def close(self): """close parent fds""" os.close(self.pipe_stdout[0]) os.close(self.pipe_stdin[1]) def destroy(self): """abort task/thread""" self.task.abort(kill=True)
def test_008_broken_pipe_on_write(self): """test StreamWorker with broken pipe on write()""" # This test creates a writable StreamWorker that will close the read # side of the pipe just after the first write to generate a broken # pipe error. class TestH(EventHandler): def __init__(self, testcase, rfd): self.testcase = testcase self.rfd = rfd self.check_hup = 0 self.check_written = 0 def ev_hup(self, worker): self.check_hup += 1 def ev_written(self, worker, node, sname, size): self.check_written += 1 self.testcase.assertEqual(os.read(self.rfd, 1024), b"initial") # close reader, that will stop the StreamWorker os.close(self.rfd) # The following write call used to raise broken pipe before # version 1.7.2. worker.write(b"final") rfd, wfd = os.pipe() hdlr = TestH(self, rfd) worker = StreamWorker(handler=hdlr) worker.set_writer("test", wfd) # closefd=True worker.write(b"initial", "test") self.run_worker(worker) self.assertEqual(hdlr.check_hup, 1) self.assertEqual(hdlr.check_written, 1)