def test_getrsyncdirs(self, testdir): config = testdir.parseconfigure('--rsyncdir=' + str(testdir.tmpdir)) nm = NodeManager(config, specs=[execnet.XSpec("popen")]) assert not nm._getrsyncdirs() nm = NodeManager(config, specs=[execnet.XSpec("popen//chdir=qwe")]) assert nm.roots assert testdir.tmpdir in nm.roots
def test_getxspecs(self, testdir): config = testdir.parseconfigure("--tx=popen", "--tx", "ssh=xyz") nodemanager = NodeManager(config) xspecs = nodemanager._getxspecs() assert len(xspecs) == 2 print(xspecs) assert xspecs[0].popen assert xspecs[1].ssh == "xyz"
def pytest_sessionstart(self, session): """Creates and starts the nodes. The nodes are setup to put their events onto self.queue. As soon as nodes start they will emit the slave_slaveready event. """ self.nodemanager = NodeManager(self.config) nodes = self.nodemanager.setup_nodes(putevent=self.queue.put) self._active_nodes.update(nodes) self._session = session
def test_optimise_popen(self, testdir, mysetup, slavecontroller): source = mysetup.source specs = ["popen"] * 3 source.join("conftest.py").write("rsyncdirs = ['a']") source.ensure('a', dir=1) config = testdir.parseconfig(source) nodemanager = NodeManager(config, specs) nodemanager.setup_nodes(None) # calls .rysnc_roots() for gwspec in nodemanager.specs: assert gwspec._samefilesystem() assert not gwspec.chdir
def test_rsync_same_popen_twice(self, config, mysetup, hookrecorder): source, dest = mysetup.source, mysetup.dest hm = NodeManager(config, ["popen//chdir=%s" %dest] * 2) hm.makegateways() source.ensure("dir1", "dir2", "hello") hm.rsync(source) call = hookrecorder.popcall("pytest_xdist_rsyncstart") assert call.source == source assert len(call.gateways) == 1 assert call.gateways[0] in hm.group call = hookrecorder.popcall("pytest_xdist_rsyncfinish")
def test_optimise_popen(self, testdir, mysetup): source, dest = mysetup.source, mysetup.dest specs = ["popen"] * 3 source.join("conftest.py").write("rsyncdirs = ['a']") source.ensure('a', dir=1) config = testdir.parseconfig(source) nodemanager = NodeManager(config, specs) nodemanager.makegateways() nodemanager.rsync_roots() for gwspec in nodemanager.specs: assert gwspec._samefilesystem() assert not gwspec.chdir
def test_rsync_roots_no_roots(self, testdir, mysetup): mysetup.source.ensure("dir1", "file1").write("hello") config = testdir.parseconfig(mysetup.source) nodemanager = NodeManager(config, ["popen//chdir=%s" % mysetup.dest]) # assert nodemanager.config.topdir == source == config.topdir nodemanager.makegateways() nodemanager.rsync_roots() p, = nodemanager.gwmanager.multi_exec( "import os ; channel.send(os.getcwd())").receive_each() p = py.path.local(p) py.builtin.print_("remote curdir", p) assert p == mysetup.dest.join(config.topdir.basename) assert p.join("dir1").check() assert p.join("dir1", "file1").check()
def test_popen_makegateway_events(self, config, hookrecorder, _pytest): hm = NodeManager(config, ["popen"] * 2) hm.makegateways() call = hookrecorder.popcall("pytest_xdist_setupnodes") assert len(call.specs) == 2 call = hookrecorder.popcall("pytest_xdist_newgateway") assert call.gateway.spec == execnet.XSpec("popen") assert call.gateway.id == "gw0" call = hookrecorder.popcall("pytest_xdist_newgateway") assert call.gateway.id == "gw1" assert len(hm.group) == 2 hm.teardown_nodes() assert not len(hm.group)
def test_popens_rsync(self, config, mysetup, slavecontroller): source = mysetup.source hm = NodeManager(config, ["popen"] * 2) hm.setup_nodes(None) assert len(hm.group) == 2 for gw in hm.group: class pseudoexec: args = [] def __init__(self, *args): self.args.extend(args) def waitclose(self): pass gw.remote_exec = pseudoexec notifications = [] for gw in hm.group: hm.rsync(gw, source, notify=lambda *args: notifications.append(args)) assert not notifications hm.teardown_nodes() assert not len(hm.group) assert "sys.path.insert" in gw.remote_exec.args[0]
def test_getrsyncdirs_with_conftest(self, testdir): p = py.path.local() for bn in 'x y z'.split(): p.mkdir(bn) testdir.makeini(""" [pytest] rsyncdirs= x """) config = testdir.parseconfigure(testdir.tmpdir, '--rsyncdir=y', '--rsyncdir=z') nm = NodeManager(config, specs=[execnet.XSpec("popen//chdir=xyz")]) roots = nm._getrsyncdirs() # assert len(roots) == 3 + 1 # pylib assert py.path.local('y') in roots assert py.path.local('z') in roots assert testdir.tmpdir.join('x') in roots
def test_getrsyncdirs_with_conftest(self, testdir): p = py.path.local() for bn in 'x y z'.split(): p.mkdir(bn) testdir.makeini(""" [pytest] rsyncdirs= x """) config = testdir.parseconfigure( testdir.tmpdir, '--rsyncdir=y', '--rsyncdir=z') nm = NodeManager(config, specs=[execnet.XSpec("popen//chdir=xyz")]) roots = nm._getrsyncdirs() # assert len(roots) == 3 + 1 # pylib assert py.path.local('y') in roots assert py.path.local('z') in roots assert testdir.tmpdir.join('x') in roots
def test_init_rsync_roots(self, testdir, mysetup, slavecontroller): source, dest = mysetup.source, mysetup.dest dir2 = source.ensure("dir1", "dir2", dir=1) source.ensure("dir1", "somefile", dir=1) dir2.ensure("hello") source.ensure("bogusdir", "file") source.join("tox.ini").write(py.std.textwrap.dedent(""" [pytest] rsyncdirs=dir1/dir2 """)) config = testdir.parseconfig(source) nodemanager = NodeManager(config, ["popen//chdir=%s" % dest]) nodemanager.setup_nodes(None) # calls .rsync_roots() assert dest.join("dir2").check() assert not dest.join("dir1").check() assert not dest.join("bogus").check()
def test_popen_rsync_subdir(self, testdir, mysetup, slavecontroller): source, dest = mysetup.source, mysetup.dest dir1 = mysetup.source.mkdir("dir1") dir2 = dir1.mkdir("dir2") dir2.ensure("hello") for rsyncroot in (dir1, source): dest.remove() nodemanager = NodeManager(testdir.parseconfig( "--tx", "popen//chdir=%s" % dest, "--rsyncdir", rsyncroot, source, )) nodemanager.setup_nodes(None) # calls .rsync_roots() if rsyncroot == source: dest = dest.join("source") assert dest.join("dir1").check() assert dest.join("dir1", "dir2").check() assert dest.join("dir1", "dir2", 'hello').check() nodemanager.teardown_nodes()
def pytest_sessionstart(self, session): """Creates and starts the nodes. The nodes are setup to put their events onto self.queue. As soon as nodes start they will emit the slave_slaveready event. """ self.nodemanager = NodeManager(self.config) nodes = self.nodemanager.setup_nodes(putevent=self.queue.put) self._active_nodes.update(nodes)
def test_rsyncignore(self, testdir, mysetup): source, dest = mysetup.source, mysetup.dest dir2 = source.ensure("dir1", "dir2", dir=1) dir5 = source.ensure("dir5", "dir6", "bogus") dirf = source.ensure("dir5", "file") dir2.ensure("hello") source.join("tox.ini").write(py.std.textwrap.dedent(""" [pytest] rsyncdirs = dir1 dir5 rsyncignore = dir1/dir2 dir5/dir6 """)) config = testdir.parseconfig(source) nodemanager = NodeManager(config, ["popen//chdir=%s" % dest]) nodemanager.makegateways() nodemanager.rsync_roots() assert dest.join("dir1").check() assert not dest.join("dir1", "dir2").check() assert dest.join("dir5","file").check() assert not dest.join("dir6").check()
def pytest_sessionstart(self, session): """Creates and starts the nodes. The nodes are setup to put their events onto self.queue. As soon as nodes start they will emit the slave_slaveready event. """ self.nodemanager = NodeManager(self.config) nodes = self.nodemanager.setup_nodes(putevent=self.queue.put) for node in nodes: node.channel.send(("RUNNING_MODE", self.dist)) self._active_nodes.update(nodes) self._session = session
def test_rsync_popen_with_path(self, config, mysetup, slavecontroller): source, dest = mysetup.source, mysetup.dest hm = NodeManager(config, ["popen//chdir=%s" % dest] * 1) hm.setup_nodes(None) source.ensure("dir1", "dir2", "hello") l = [] for gw in hm.group: hm.rsync(gw, source, notify=lambda *args: l.append(args)) assert len(l) == 1 assert l[0] == ("rsyncrootready", hm.group['gw0'].spec, source) hm.teardown_nodes() dest = dest.join(source.basename) assert dest.join("dir1").check() assert dest.join("dir1", "dir2").check() assert dest.join("dir1", "dir2", 'hello').check()
def test_rsyncignore(self, testdir, mysetup, slavecontroller): source, dest = mysetup.source, mysetup.dest dir2 = source.ensure("dir1", "dir2", dir=1) source.ensure("dir5", "dir6", "bogus") source.ensure("dir5", "file") dir2.ensure("hello") source.ensure("foo", "bar") source.ensure("bar", "foo") source.join("tox.ini").write(py.std.textwrap.dedent(""" [pytest] rsyncdirs = dir1 dir5 rsyncignore = dir1/dir2 dir5/dir6 foo* """)) config = testdir.parseconfig(source) config.option.rsyncignore = ['bar'] nodemanager = NodeManager(config, ["popen//chdir=%s" % dest]) nodemanager.setup_nodes(None) # calls .rsync_roots() assert dest.join("dir1").check() assert not dest.join("dir1", "dir2").check() assert dest.join("dir5", "file").check() assert not dest.join("dir6").check() assert not dest.join('foo').check() assert not dest.join('bar').check()
def test_rsync_popen_with_path(self, config, mysetup): source, dest = mysetup.source, mysetup.dest hm = NodeManager(config, ["popen//chdir=%s" %dest] * 1) hm.makegateways() source.ensure("dir1", "dir2", "hello") l = [] hm.rsync(source, notify=lambda *args: l.append(args)) assert len(l) == 1 assert l[0] == ("rsyncrootready", hm.group['gw0'].spec, source) hm.teardown_nodes() dest = dest.join(source.basename) assert dest.join("dir1").check() assert dest.join("dir1", "dir2").check() assert dest.join("dir1", "dir2", 'hello').check()
def test_rsync_same_popen_twice(self, config, mysetup, hookrecorder, slavecontroller): source, dest = mysetup.source, mysetup.dest hm = NodeManager(config, ["popen//chdir=%s" % dest] * 2) hm.roots = [] hm.setup_nodes(None) source.ensure("dir1", "dir2", "hello") gw = hm.group[0] hm.rsync(gw, source) call = hookrecorder.popcall("pytest_xdist_rsyncstart") assert call.source == source assert len(call.gateways) == 1 assert call.gateways[0] in hm.group call = hookrecorder.popcall("pytest_xdist_rsyncfinish")
def test_popen_makegateway_events(self, config, hookrecorder, slavecontroller): hm = NodeManager(config, ["popen"] * 2) hm.setup_nodes(None) call = hookrecorder.popcall("pytest_xdist_setupnodes") assert len(call.specs) == 2 call = hookrecorder.popcall("pytest_xdist_newgateway") assert call.gateway.spec == execnet.XSpec("popen") assert call.gateway.id == "gw0" call = hookrecorder.popcall("pytest_xdist_newgateway") assert call.gateway.id == "gw1" assert len(hm.group) == 2 hm.teardown_nodes() assert not len(hm.group)
def test_popens_rsync(self, config, mysetup): source = mysetup.source hm = NodeManager(config, ["popen"] * 2) hm.makegateways() assert len(hm.group) == 2 for gw in hm.group: class pseudoexec: args = [] def __init__(self, *args): self.args.extend(args) def waitclose(self): pass gw.remote_exec = pseudoexec l = [] hm.rsync(source, notify=lambda *args: l.append(args)) assert not l hm.teardown_nodes() assert not len(hm.group) assert "sys.path.insert" in gw.remote_exec.args[0]
class DSession: """A py.test plugin which runs a distributed test session At the beginning of the test session this creates a NodeManager instance which creates and starts all nodes. Nodes then emit events processed in the pytest_runtestloop hook using the slave_* methods. Once a node is started it will automatically start running the py.test mainloop with some custom hooks. This means a node automatically starts collecting tests. Once tests are collected it will wait for instructions. """ def __init__(self, config): self.config = config self.log = py.log.Producer("dsession") if not config.option.debug: py.log.setconsumer(self.log._keywords, None) self.shuttingdown = False self.countfailures = 0 self.maxfail = config.getvalue("maxfail") self.queue = queue.Queue() self._session = None self._failed_collection_errors = {} self._active_nodes = set() self._failed_nodes_count = 0 self._max_slave_restart = self.config.getoption('max_slave_restart') if self._max_slave_restart is not None: self._max_slave_restart = int(self._max_slave_restart) try: self.terminal = config.pluginmanager.getplugin("terminalreporter") except KeyError: self.terminal = None else: self.trdist = TerminalDistReporter(config) config.pluginmanager.register(self.trdist, "terminaldistreporter") @property def session_finished(self): """Return True if the distributed session has finished This means all nodes have executed all test items. This is used to by pytest_runtestloop to break out of it's loop. """ return bool(self.shuttingdown and not self._active_nodes) def report_line(self, line): if self.terminal and self.config.option.verbose >= 0: self.terminal.write_line(line) @pytest.mark.trylast def pytest_sessionstart(self, session): """Creates and starts the nodes. The nodes are setup to put their events onto self.queue. As soon as nodes start they will emit the slave_slaveready event. """ self.nodemanager = NodeManager(self.config) nodes = self.nodemanager.setup_nodes(putevent=self.queue.put) self._active_nodes.update(nodes) self._session = session def pytest_sessionfinish(self, session): """Shutdown all nodes.""" nm = getattr(self, 'nodemanager', None) # if not fully initialized if nm is not None: nm.teardown_nodes() self._session = None def pytest_collection(self): # prohibit collection of test items in master process return True def pytest_runtestloop(self): numnodes = len(self.nodemanager.specs) dist = self.config.getvalue("dist") if dist == "load": self.sched = LoadScheduling(numnodes, log=self.log, config=self.config) elif dist == "each": self.sched = EachScheduling(numnodes, log=self.log) else: assert 0, dist self.shouldstop = False while not self.session_finished: self.loop_once() if self.shouldstop: raise Interrupted(str(self.shouldstop)) return True def loop_once(self): """Process one callback from one of the slaves.""" while 1: try: eventcall = self.queue.get(timeout=2.0) break except queue.Empty: continue callname, kwargs = eventcall assert callname, kwargs method = "slave_" + callname call = getattr(self, method) self.log("calling method", method, kwargs) call(**kwargs) if self.sched.tests_finished(): self.triggershutdown() # # callbacks for processing events from slaves # def slave_slaveready(self, node, slaveinfo): """Emitted when a node first starts up. This adds the node to the scheduler, nodes continue with collection without any further input. """ node.slaveinfo = slaveinfo node.slaveinfo['id'] = node.gateway.id node.slaveinfo['spec'] = node.gateway.spec self.config.hook.pytest_testnodeready(node=node) if self.shuttingdown: node.shutdown() else: self.sched.addnode(node) def slave_slavefinished(self, node): """Emitted when node executes its pytest_sessionfinish hook. Removes the node from the scheduler. The node might not be the scheduler if it had not emitted slaveready before shutdown was triggered. """ self.config.hook.pytest_testnodedown(node=node, error=None) if node.slaveoutput['exitstatus'] == 2: # keyboard-interrupt self.shouldstop = "%s received keyboard-interrupt" % (node, ) self.slave_errordown(node, "keyboard-interrupt") return if node in self.sched.nodes: crashitem = self.sched.remove_node(node) assert not crashitem, (crashitem, node) self._active_nodes.remove(node) def slave_errordown(self, node, error): """Emitted by the SlaveController when a node dies.""" self.config.hook.pytest_testnodedown(node=node, error=error) try: crashitem = self.sched.remove_node(node) except KeyError: pass else: if crashitem: self.handle_crashitem(crashitem, node) self._failed_nodes_count += 1 maximum_reached = (self._max_slave_restart is not None and self._failed_nodes_count > self._max_slave_restart) if maximum_reached: if self._max_slave_restart == 0: msg = 'Slave restarting disabled' else: msg = "Maximum crashed slaves reached: %d" % \ self._max_slave_restart self.report_line(msg) else: self.report_line("Replacing crashed slave %s" % node.gateway.id) self._clone_node(node) self._active_nodes.remove(node) def slave_collectionfinish(self, node, ids): """Slave has finished test collection. This adds the collection for this node to the scheduler. If the scheduler indicates collection is finished (i.e. all initial nodes have submitted their collection), then tells the scheduler to schedule the collected items. When initiating scheduling the first time it logs which scheduler is in use. """ if self.shuttingdown: return # tell session which items were effectively collected otherwise # the master node will finish the session with EXIT_NOTESTSCOLLECTED self._session.testscollected = len(ids) self.sched.addnode_collection(node, ids) if self.terminal: self.trdist.setstatus(node.gateway.spec, "[%d]" % (len(ids))) if self.sched.collection_is_completed: if self.terminal and not self.sched.haspending(): self.trdist.ensure_show_status() self.terminal.write_line("") self.terminal.write_line("scheduling tests via %s" % (self.sched.__class__.__name__)) self.sched.init_distribute() def slave_logstart(self, node, nodeid, location): """Emitted when a node calls the pytest_runtest_logstart hook.""" self.config.hook.pytest_runtest_logstart(nodeid=nodeid, location=location) def slave_testreport(self, node, rep): """Emitted when a node calls the pytest_runtest_logreport hook. If the node indicates it is finished with a test item remove the item from the pending list in the scheduler. """ if rep.when == "call" or (rep.when == "setup" and not rep.passed): self.sched.remove_item(node, rep.item_index, rep.duration) # self.report_line("testreport %s: %s" %(rep.id, rep.status)) rep.node = node self.config.hook.pytest_runtest_logreport(report=rep) self._handlefailures(rep) def slave_collectreport(self, node, rep): """Emitted when a node calls the pytest_collectreport hook.""" if rep.failed: self._failed_slave_collectreport(node, rep) def _clone_node(self, node): """Return new node based on an existing one. This is normally for when a node died, this will copy the spec of the existing node and create a new one with a new id. The new node will have been setup so will start calling the "slave_*" hooks and do work soon. """ spec = node.gateway.spec spec.id = None self.nodemanager.group.allocate_id(spec) node = self.nodemanager.setup_node(spec, self.queue.put) self._active_nodes.add(node) return node def _failed_slave_collectreport(self, node, rep): # Check we haven't already seen this report (from # another slave). if rep.longrepr not in self._failed_collection_errors: self._failed_collection_errors[rep.longrepr] = True self.config.hook.pytest_collectreport(report=rep) self._handlefailures(rep) def _handlefailures(self, rep): if rep.failed: self.countfailures += 1 if self.maxfail and self.countfailures >= self.maxfail: self.shouldstop = "stopping after %d failures" % ( self.countfailures) def triggershutdown(self): self.log("triggering shutdown") self.shuttingdown = True for node in self.sched.nodes: node.shutdown() def handle_crashitem(self, nodeid, slave): # XXX get more reporting info by recording pytest_runtest_logstart? # XXX count no of failures and retry N times runner = self.config.pluginmanager.getplugin("runner") fspath = nodeid.split("::")[0] msg = "Slave %r crashed while running %r" % (slave.gateway.id, nodeid) rep = runner.TestReport(nodeid, (fspath, None, fspath), (), "failed", msg, "???") rep.node = slave self.config.hook.pytest_runtest_logreport(report=rep)
def test_xspecs_multiplied(self, testdir): config = testdir.parseconfigure("--tx=3*popen", ) xspecs = NodeManager(config)._getxspecs() assert len(xspecs) == 3 assert xspecs[1].popen
def test_getrsyncignore(self, testdir): config = testdir.parseconfigure('--rsyncignore=fo*') nm = NodeManager(config, specs=[execnet.XSpec("popen//chdir=qwe")]) assert 'fo*' in nm.rsyncoptions['ignores']
class DSession: def __init__(self, config): self.config = config self.log = py.log.Producer("dsession") if not config.option.debug: py.log.setconsumer(self.log._keywords, None) self.shuttingdown = False self.countfailures = 0 self.maxfail = config.getvalue("maxfail") self.queue = queue.Queue() try: self.terminal = config.pluginmanager.getplugin("terminalreporter") except KeyError: self.terminal = None else: self.trdist = TerminalDistReporter(config) config.pluginmanager.register(self.trdist, "terminaldistreporter") def report_line(self, line): if self.terminal and self.config.option.verbose >= 0: self.terminal.write_line(line) @pytest.mark.trylast def pytest_sessionstart(self, session): self.nodemanager = NodeManager(self.config) self.nodemanager.setup_nodes(putevent=self.queue.put) def pytest_sessionfinish(self, session): """ teardown any resources after a test run. """ nm = getattr(self, "nodemanager", None) # if not fully initialized if nm is not None: nm.teardown_nodes() def pytest_collection(self): # prohibit collection of test items in master process return True def pytest_runtestloop(self): numnodes = len(self.nodemanager.specs) dist = self.config.getvalue("dist") if dist == "load": self.sched = LoadScheduling(numnodes, log=self.log) elif dist == "each": self.sched = EachScheduling(numnodes, log=self.log) else: assert 0, dist self.shouldstop = False self.session_finished = False while not self.session_finished: self.loop_once() if self.shouldstop: raise Interrupted(str(self.shouldstop)) return True def loop_once(self): """ process one callback from one of the slaves. """ while 1: try: eventcall = self.queue.get(timeout=2.0) break except queue.Empty: continue callname, kwargs = eventcall assert callname, kwargs method = "slave_" + callname call = getattr(self, method) self.log("calling method: %s(**%s)" % (method, kwargs)) call(**kwargs) if self.sched.tests_finished(): self.triggershutdown() # # callbacks for processing events from slaves # def slave_slaveready(self, node, slaveinfo): node.slaveinfo = slaveinfo node.slaveinfo["id"] = node.gateway.id node.slaveinfo["spec"] = node.gateway.spec self.config.hook.pytest_testnodeready(node=node) self.sched.addnode(node) if self.shuttingdown: node.shutdown() def slave_slavefinished(self, node): self.config.hook.pytest_testnodedown(node=node, error=None) if node.slaveoutput["exitstatus"] == 2: # keyboard-interrupt self.shouldstop = "%s received keyboard-interrupt" % (node,) self.slave_errordown(node, "keyboard-interrupt") return crashitem = self.sched.remove_node(node) # assert not crashitem, (crashitem, node) if self.shuttingdown and not self.sched.hasnodes(): self.session_finished = True def slave_errordown(self, node, error): self.config.hook.pytest_testnodedown(node=node, error=error) try: crashitem = self.sched.remove_node(node) except KeyError: pass else: if crashitem: self.handle_crashitem(crashitem, node) # self.report_line("item crashed on node: %s" % crashitem) if not self.sched.hasnodes(): self.session_finished = True def slave_collectionfinish(self, node, ids): self.sched.addnode_collection(node, ids) if self.terminal: self.trdist.setstatus(node.gateway.spec, "[%d]" % (len(ids))) if self.sched.collection_is_completed: if self.terminal: self.trdist.ensure_show_status() self.terminal.write_line("") self.terminal.write_line("scheduling tests via %s" % (self.sched.__class__.__name__)) self.sched.init_distribute() def slave_logstart(self, node, nodeid, location): self.config.hook.pytest_runtest_logstart(nodeid=nodeid, location=location) def slave_testreport(self, node, rep): if not (rep.passed and rep.when != "call"): if rep.when in ("setup", "call"): self.sched.remove_item(node, rep.nodeid) # self.report_line("testreport %s: %s" %(rep.id, rep.status)) rep.node = node self.config.hook.pytest_runtest_logreport(report=rep) self._handlefailures(rep) def slave_collectreport(self, node, rep): # self.report_line("collectreport %s: %s" %(rep.id, rep.status)) # rep.node = node self._handlefailures(rep) def _handlefailures(self, rep): if rep.failed: self.countfailures += 1 if self.maxfail and self.countfailures >= self.maxfail: self.shouldstop = "stopping after %d failures" % (self.countfailures) def triggershutdown(self): self.log("triggering shutdown") self.shuttingdown = True for node in self.sched.node2pending: node.shutdown() def handle_crashitem(self, nodeid, slave): # XXX get more reporting info by recording pytest_runtest_logstart? runner = self.config.pluginmanager.getplugin("runner") fspath = nodeid.split("::")[0] msg = "Slave %r crashed while running %r" % (slave.gateway.id, nodeid) rep = runner.TestReport(nodeid, (fspath, None, fspath), (), "failed", msg, "???") rep.node = slave self.config.hook.pytest_runtest_logreport(report=rep)
class DSession: def __init__(self, config): self.config = config self.log = py.log.Producer("dsession") if not config.option.debug: py.log.setconsumer(self.log._keywords, None) self.shuttingdown = False self.countfailures = 0 self.maxfail = config.getvalue("maxfail") self.queue = queue.Queue() try: self.terminal = config.pluginmanager.getplugin("terminalreporter") except KeyError: self.terminal = None else: self.trdist = TerminalDistReporter(config) config.pluginmanager.register(self.trdist, "terminaldistreporter") def report_line(self, line): if self.terminal and self.config.option.verbose >= 0: self.terminal.write_line(line) @pytest.mark.trylast def pytest_sessionstart(self, session): self.nodemanager = NodeManager(self.config) self.nodemanager.setup_nodes(putevent=self.queue.put) def pytest_sessionfinish(self, session): """ teardown any resources after a test run. """ nm = getattr(self, 'nodemanager', None) # if not fully initialized if nm is not None: nm.teardown_nodes() def pytest_collection(self): # prohibit collection of test items in master process return True def pytest_runtestloop(self): numnodes = len(self.nodemanager.specs) dist = self.config.getvalue("dist") if dist == "load": self.sched = LoadScheduling(numnodes, log=self.log) elif dist == "each": self.sched = EachScheduling(numnodes, log=self.log) else: assert 0, dist self.shouldstop = False self.session_finished = False while not self.session_finished: self.loop_once() if self.shouldstop: raise Interrupted(str(self.shouldstop)) return True def loop_once(self): """ process one callback from one of the slaves. """ while 1: try: eventcall = self.queue.get(timeout=2.0) break except queue.Empty: continue callname, kwargs = eventcall assert callname, kwargs method = "slave_" + callname call = getattr(self, method) self.log("calling method: %s(**%s)" % (method, kwargs)) call(**kwargs) if self.sched.tests_finished(): self.triggershutdown() # # callbacks for processing events from slaves # def slave_slaveready(self, node, slaveinfo): node.slaveinfo = slaveinfo node.slaveinfo['id'] = node.gateway.id node.slaveinfo['spec'] = node.gateway.spec self.config.hook.pytest_testnodeready(node=node) self.sched.addnode(node) if self.shuttingdown: node.shutdown() def slave_slavefinished(self, node): self.config.hook.pytest_testnodedown(node=node, error=None) if node.slaveoutput['exitstatus'] == 2: # keyboard-interrupt self.shouldstop = "%s received keyboard-interrupt" % (node, ) self.slave_errordown(node, "keyboard-interrupt") return crashitem = self.sched.remove_node(node) #assert not crashitem, (crashitem, node) if self.shuttingdown and not self.sched.hasnodes(): self.session_finished = True def slave_errordown(self, node, error): self.config.hook.pytest_testnodedown(node=node, error=error) try: crashitem = self.sched.remove_node(node) except KeyError: pass else: if crashitem: self.handle_crashitem(crashitem, node) #self.report_line("item crashed on node: %s" % crashitem) if not self.sched.hasnodes(): self.session_finished = True def slave_collectionfinish(self, node, ids): self.sched.addnode_collection(node, ids) if self.terminal: self.trdist.setstatus(node.gateway.spec, "[%d]" % (len(ids))) if self.sched.collection_is_completed: if self.terminal: self.trdist.ensure_show_status() self.terminal.write_line("") self.terminal.write_line("scheduling tests via %s" % (self.sched.__class__.__name__)) self.sched.init_distribute() def slave_logstart(self, node, nodeid, location): self.config.hook.pytest_runtest_logstart(nodeid=nodeid, location=location) def slave_testreport(self, node, rep): if not (rep.passed and rep.when != "call"): if rep.when in ("setup", "call"): self.sched.remove_item(node, rep.nodeid) #self.report_line("testreport %s: %s" %(rep.id, rep.status)) rep.node = node self.config.hook.pytest_runtest_logreport(report=rep) self._handlefailures(rep) def slave_collectreport(self, node, rep): #self.report_line("collectreport %s: %s" %(rep.id, rep.status)) #rep.node = node self._handlefailures(rep) def _handlefailures(self, rep): if rep.failed: self.countfailures += 1 if self.maxfail and self.countfailures >= self.maxfail: self.shouldstop = "stopping after %d failures" % ( self.countfailures) def triggershutdown(self): self.log("triggering shutdown") self.shuttingdown = True for node in self.sched.node2pending: node.shutdown() def handle_crashitem(self, nodeid, slave): # XXX get more reporting info by recording pytest_runtest_logstart? runner = self.config.pluginmanager.getplugin("runner") fspath = nodeid.split("::")[0] msg = "Slave %r crashed while running %r" % (slave.gateway.id, nodeid) rep = runner.TestReport(nodeid, (fspath, None, fspath), (), "failed", msg, "???") rep.node = slave self.config.hook.pytest_runtest_logreport(report=rep)
def test_default_chdir(self, config): l = ["ssh=noco", "socket=xyz"] for spec in NodeManager(config, l).specs: assert spec.chdir == "pyexecnetcache" for spec in NodeManager(config, l, defaultchdir="abc").specs: assert spec.chdir == "abc"
def get_nodes_specs(nodes, python=None, chdir=None, virtualenv_path=None, mem_per_process=None, max_processes=None, rsync_max_processes=None, rsync_bandwidth_limit=None, config=None): """Get nodes specs. Get list of node names, connect to each of them, get the system information, produce the list of node specs out of that information filtering non-connectable nodes and nodes which don't comply the requirements. Executed on the master node side. :param nodes: `list` of node names in form [[<username>@]<hostname>, ...] :type nodes: list :param python: python executable name to use on the remote side :type python: str :param chdir: relative path where to run (and sync) tests on the remote side :type chdir: str :param virtualenv_path: relative path to the virtualenv to activate on the remote test node :type virtualenv_path: str :param mem_per_process: optional amount of memory per process needed, in megabytest :type mem_per_process: int :param max_processes: optional maximum number of processes per test node :type max_processes: int :param rsync_max_processes: optional maximum number of rsync processes :type rsync_max_processes: int :param rsync_bandwidth_limit: optional bandwidth limit per rsync process in kilobytes per second :type rsync_bandwidth_limit: int :param config: pytest config object :type config: pytest.Config :return: `list` of test gateway specs for all test nodes which confirm given requirements in form ['1*ssh=<node>//id=<hostname>:<index>', ...] :rtype: list """ group = execnet.Group() try: if virtualenv_path: nm = NodeManager(config, specs=[]) virtualenv_path = os.path.relpath(virtualenv_path) node_specs = [] node_caps = {} root_dir = getrootdir(config, '') nodes = list(unique_everseen(nodes)) print('Detected root dir: {0}'.format(root_dir)) rsync = RSync(root_dir, chdir, includes=config.getini("rsyncdirs"), jobs=rsync_max_processes or len(nodes), bwlimit=rsync_bandwidth_limit, bandwidth_limit=rsync_bandwidth_limit, **nm.rsyncoptions) print('Detecting connectable test nodes...') for node in nodes: host = node.split('@')[1] if '@' in node else node spec = 'ssh={node}//id={host}//chdir={chdir}//python={python}'.format( node=node, host=host, chdir=chdir, python=python) try: make_gateway(group, spec) except Exception: # pylint: disable=W0703 continue rsync.add_target_host(node) node_specs.append((node, host)) if node_specs: print('Found {0} connectable test nodes: {1}'.format( len(node_specs), rsync.targets)) else: pytest.exit('None of the given test nodes are connectable') print('RSyncing directory structure') rsync.send() print('RSync finished') develop_eggs = get_develop_eggs(root_dir, config) group.remote_exec(patches.activate_env, virtualenv_path=virtualenv_path, develop_eggs=develop_eggs).waitclose() multi_channel = group.remote_exec(get_node_capabilities) try: caps = multi_channel.receive_each(True) for ch, cap in caps: node_caps[ch.gateway.id] = cap finally: multi_channel.waitclose() return list( chain.from_iterable( get_node_specs( node, hst, node_caps[hst], python=os.path.join(chdir, virtualenv_path, 'bin', python), chdir=chdir, mem_per_process=mem_per_process, max_processes=max_processes) for node, hst in node_specs)) finally: try: group.terminate() except Exception: # pylint: disable=W0703 pass
def pytest_sessionstart(self, session): self.nodemanager = NodeManager(self.config) self.nodemanager.setup_nodes(putevent=self.queue.put)
class DSession: """A py.test plugin which runs a distributed test session At the beginning of the test session this creates a NodeManager instance which creates and starts all nodes. Nodes then emit events processed in the pytest_runtestloop hook using the slave_* methods. Once a node is started it will automatically start running the py.test mainloop with some custom hooks. This means a node automatically starts collecting tests. Once tests are collected it will wait for instructions. """ def __init__(self, config): self.config = config self.log = py.log.Producer("dsession") if not config.option.debug: py.log.setconsumer(self.log._keywords, None) self.shuttingdown = False self.countfailures = 0 self.maxfail = config.getvalue("maxfail") self.queue = queue.Queue() self._session = None self._failed_collection_errors = {} self._active_nodes = set() self._failed_nodes_count = 0 self._max_slave_restart = self.config.getoption('max_slave_restart') if self._max_slave_restart is not None: self._max_slave_restart = int(self._max_slave_restart) try: self.terminal = config.pluginmanager.getplugin("terminalreporter") except KeyError: self.terminal = None else: self.trdist = TerminalDistReporter(config) config.pluginmanager.register(self.trdist, "terminaldistreporter") @property def session_finished(self): """Return True if the distributed session has finished This means all nodes have executed all test items. This is used to by pytest_runtestloop to break out of it's loop. """ return bool(self.shuttingdown and not self._active_nodes) def report_line(self, line): if self.terminal and self.config.option.verbose >= 0: self.terminal.write_line(line) @pytest.mark.trylast def pytest_sessionstart(self, session): """Creates and starts the nodes. The nodes are setup to put their events onto self.queue. As soon as nodes start they will emit the slave_slaveready event. """ self.nodemanager = NodeManager(self.config) nodes = self.nodemanager.setup_nodes(putevent=self.queue.put) self._active_nodes.update(nodes) self._session = session def pytest_sessionfinish(self, session): """Shutdown all nodes.""" nm = getattr(self, 'nodemanager', None) # if not fully initialized if nm is not None: nm.teardown_nodes() self._session = None def pytest_collection(self): # prohibit collection of test items in master process return True def pytest_runtestloop(self): numnodes = len(self.nodemanager.specs) dist = self.config.getvalue("dist") if dist == "load": self.sched = LoadScheduling(numnodes, log=self.log, config=self.config) elif dist == "each": self.sched = EachScheduling(numnodes, log=self.log) else: assert 0, dist self.shouldstop = False while not self.session_finished: self.loop_once() if self.shouldstop: raise Interrupted(str(self.shouldstop)) return True def loop_once(self): """Process one callback from one of the slaves.""" while 1: try: eventcall = self.queue.get(timeout=2.0) break except queue.Empty: continue callname, kwargs = eventcall assert callname, kwargs method = "slave_" + callname call = getattr(self, method) self.log("calling method", method, kwargs) call(**kwargs) if self.sched.tests_finished(): self.triggershutdown() # # callbacks for processing events from slaves # def slave_slaveready(self, node, slaveinfo): """Emitted when a node first starts up. This adds the node to the scheduler, nodes continue with collection without any further input. """ node.slaveinfo = slaveinfo node.slaveinfo['id'] = node.gateway.id node.slaveinfo['spec'] = node.gateway.spec self.config.hook.pytest_testnodeready(node=node) if self.shuttingdown: node.shutdown() else: self.sched.addnode(node) def slave_slavefinished(self, node): """Emitted when node executes its pytest_sessionfinish hook. Removes the node from the scheduler. The node might not be the scheduler if it had not emitted slaveready before shutdown was triggered. """ self.config.hook.pytest_testnodedown(node=node, error=None) if node.slaveoutput['exitstatus'] == 2: # keyboard-interrupt self.shouldstop = "%s received keyboard-interrupt" % (node,) self.slave_errordown(node, "keyboard-interrupt") return if node in self.sched.nodes: crashitem = self.sched.remove_node(node) assert not crashitem, (crashitem, node) self._active_nodes.remove(node) def slave_errordown(self, node, error): """Emitted by the SlaveController when a node dies.""" self.config.hook.pytest_testnodedown(node=node, error=error) try: crashitem = self.sched.remove_node(node) except KeyError: pass else: if crashitem: self.handle_crashitem(crashitem, node) self._failed_nodes_count += 1 maximum_reached = (self._max_slave_restart is not None and self._failed_nodes_count > self._max_slave_restart) if maximum_reached: if self._max_slave_restart == 0: msg = 'Slave restarting disabled' else: msg = "Maximum crashed slaves reached: %d" % \ self._max_slave_restart self.report_line(msg) else: self.report_line("Replacing crashed slave %s" % node.gateway.id) self._clone_node(node) self._active_nodes.remove(node) def slave_collectionfinish(self, node, ids): """Slave has finished test collection. This adds the collection for this node to the scheduler. If the scheduler indicates collection is finished (i.e. all initial nodes have submitted their collection), then tells the scheduler to schedule the collected items. When initiating scheduling the first time it logs which scheduler is in use. """ if self.shuttingdown: return # tell session which items were effectively collected otherwise # the master node will finish the session with EXIT_NOTESTSCOLLECTED self._session.testscollected = len(ids) self.sched.addnode_collection(node, ids) if self.terminal: self.trdist.setstatus(node.gateway.spec, "[%d]" % (len(ids))) if self.sched.collection_is_completed: if self.terminal and not self.sched.haspending(): self.trdist.ensure_show_status() self.terminal.write_line("") self.terminal.write_line("scheduling tests via %s" % ( self.sched.__class__.__name__)) self.sched.init_distribute() def slave_logstart(self, node, nodeid, location): """Emitted when a node calls the pytest_runtest_logstart hook.""" self.config.hook.pytest_runtest_logstart( nodeid=nodeid, location=location) def slave_testreport(self, node, rep): """Emitted when a node calls the pytest_runtest_logreport hook. If the node indicates it is finished with a test item remove the item from the pending list in the scheduler. """ if rep.when == "call" or (rep.when == "setup" and not rep.passed): self.sched.remove_item(node, rep.item_index, rep.duration) # self.report_line("testreport %s: %s" %(rep.id, rep.status)) rep.node = node self.config.hook.pytest_runtest_logreport(report=rep) self._handlefailures(rep) def slave_collectreport(self, node, rep): """Emitted when a node calls the pytest_collectreport hook.""" if rep.failed: self._failed_slave_collectreport(node, rep) def _clone_node(self, node): """Return new node based on an existing one. This is normally for when a node died, this will copy the spec of the existing node and create a new one with a new id. The new node will have been setup so will start calling the "slave_*" hooks and do work soon. """ spec = node.gateway.spec spec.id = None self.nodemanager.group.allocate_id(spec) node = self.nodemanager.setup_node(spec, self.queue.put) self._active_nodes.add(node) return node def _failed_slave_collectreport(self, node, rep): # Check we haven't already seen this report (from # another slave). if rep.longrepr not in self._failed_collection_errors: self._failed_collection_errors[rep.longrepr] = True self.config.hook.pytest_collectreport(report=rep) self._handlefailures(rep) def _handlefailures(self, rep): if rep.failed: self.countfailures += 1 if self.maxfail and self.countfailures >= self.maxfail: self.shouldstop = "stopping after %d failures" % ( self.countfailures) def triggershutdown(self): self.log("triggering shutdown") self.shuttingdown = True for node in self.sched.nodes: node.shutdown() def handle_crashitem(self, nodeid, slave): # XXX get more reporting info by recording pytest_runtest_logstart? # XXX count no of failures and retry N times runner = self.config.pluginmanager.getplugin("runner") fspath = nodeid.split("::")[0] msg = "Slave %r crashed while running %r" % (slave.gateway.id, nodeid) rep = runner.TestReport(nodeid, (fspath, None, fspath), (), "failed", msg, "???") rep.node = slave self.config.hook.pytest_runtest_logreport(report=rep)
def test_popen_no_default_chdir(self, config): gm = NodeManager(config, ["popen"]) assert gm.specs[0].chdir is None