def setUp(self): unittest.TestCase.setUp(self) self.dm = NodeManager(False) self._dm_server = NMRestServer(self.dm) self._dm_t = threading.Thread(target=self._dm_server.start, args=(hostname, constants.NODE_DEFAULT_REST_PORT)) self._dm_t.start()
def setUp(self): unittest.TestCase.setUp(self) self.dm = NodeManager(False) self._dm_server = NMRestServer(self.dm) self._dm_t = threading.Thread(target=self._dm_server.start, args=(hostname, constants.NODE_DEFAULT_REST_PORT)) self._dm_t.start() self.dim = DataIslandManager(dmHosts=[hostname]) self._dim_server = CompositeManagerRestServer(self.dim) self._dim_t = threading.Thread( target=self._dim_server.start, args=(hostname, constants.ISLAND_DEFAULT_REST_PORT)) self._dim_t.start()
def _start_dm(self, **kwargs): host, events_port, rpc_port = nm_conninfo(len(self._dms)) nm = NodeManager(useDLM=False, host=host, events_port=events_port, rpc_port=rpc_port, **kwargs) self._dms.append(nm) return nm
def test_many_relationships(self): """ A test in which a drop is related to many other drops that live in a separate DM (and thus requires many Pyro connections). Drop A is accessed by many applications (B1, B2, .., BN), which should not exhaust resources on DM #1 (in particular, the pyro thread pool). We collapse all into C so we can monitor only its status to know that the execution is over. DM #1 DM #2 ======= ==================== | | | |--> B1 --| | | | | |--> B2 --| | | A --|----|-|--> B3 --|--> C | | | | |.........| | | | | |--> BN --| | ======= ==================== """ dm1 = NodeManager(useDLM=False) dm2 = NodeManager(useDLM=False) sessionId = 's1' N = 100 g1 = [{"oid":"A", "type":"plain", "storage": "memory"}] g2 = [{"oid":"C", "type":"plain", "storage": "memory"}] for i in xrange(N): b_oid = "B%d" % (i,) # SleepAndCopyApp effectively opens the input drop g2.append({"oid":b_oid, "type":"app", "app":"test.graphsRepository.SleepAndCopyApp", "outputs":["C"], "sleepTime": 0}) uris1 = dm1.quickDeploy(sessionId, g1) uris2 = dm2.quickDeploy(sessionId, g2) self.assertEquals(1, len(uris1)) self.assertEquals(1+N, len(uris2)) # We externally wire the Proxy objects to establish the inter-DM # relationships. Make sure we release the proxies with Pyro4.Proxy(uris1['A']) as a: for i in xrange(N): with Pyro4.Proxy(uris2['B%d' % (i,)]) as b: b.addInput(a, False) a.addConsumer(b, False) # Run! The sole fact that this doesn't throw exceptions is already # a good proof that everything is working as expected c = Pyro4.Proxy(uris2['C']) with droputils.EvtConsumerProxyCtx(self, c, 5): a.write('a') a.setCompleted() dm1.destroySession(sessionId) dm2.destroySession(sessionId)
def test_runGraphOneDOPerDOM(self): """ A test that creates three DROPs in two different DMs, wire two of them together externally (i.e., using their proxies), and runs the graph. For this the graphs that are fed into the DMs must *not* express the inter-DM relationships. The graph looks like: DM #1 DM #2 ======= ============= | A --|----|-> B --> C | ======= ============= """ dm1 = NodeManager(useDLM=False) dm2 = NodeManager(useDLM=False) sessionId = 's1' g1 = [{"oid":"A", "type":"plain", "storage": "memory"}] g2 = [{"oid":"B", "type":"app", "app":"dfms.apps.crc.CRCApp"}, {"oid":"C", "type":"plain", "storage": "memory", "producers":["B"]}] uris1 = dm1.quickDeploy(sessionId, g1) uris2 = dm2.quickDeploy(sessionId, g2) self.assertEquals(1, len(uris1)) self.assertEquals(2, len(uris2)) # We externally wire the Proxy objects now a = Pyro4.Proxy(uris1['A']) b = Pyro4.Proxy(uris2['B']) c = Pyro4.Proxy(uris2['C']) a.addConsumer(b) # Run! We wait until c is completed with droputils.EvtConsumerProxyCtx(self, c, 1): a.write('a') a.setCompleted() for drop in a, b, c: self.assertEquals(DROPStates.COMPLETED, drop.status) self.assertEquals(a.checksum, int(droputils.allDropContents(c))) for dropProxy in a,b,c: dropProxy._pyroRelease() dm1.destroySession(sessionId) dm2.destroySession(sessionId)
def test_error_listener(self): evt = threading.Event() erroneous_drops = [] class listener(object): def on_error(self, drop): erroneous_drops.append(drop.uid) if len(erroneous_drops) == 2: # both 'C' and 'B' failed already evt.set() sessionId = 'lala' dm = NodeManager(useDLM=False, error_listener=listener()) g = [{"oid":"A", "type":"plain", "storage": "memory"}, {"oid":"B", "type":"app", "app":"test.manager.test_dm.ErroneousApp", "inputs": ["A"]}, {"oid":"C", "type":"plain", "storage": "memory", "producers":["B"]}] dm.createSession(sessionId) dm.addGraphSpec(sessionId, g) dm.deploySession(sessionId, ["A"]) self.assertTrue(evt.wait(10), "Didn't receive errors on time")
class TestRest(unittest.TestCase): def setUp(self): unittest.TestCase.setUp(self) self.dm = NodeManager(False) self._dm_server = NMRestServer(self.dm) self._dm_t = threading.Thread(target=self._dm_server.start, args=(hostname, constants.NODE_DEFAULT_REST_PORT)) self._dm_t.start() def tearDown(self): unittest.TestCase.tearDown(self) self._dm_server.stop() self._dm_t.join() self.dm.shutdown() self.assertFalse(self._dm_t.isAlive()) def test_errtype(self): sid = 'lala' c = NodeManagerClient(hostname) c.createSession(sid) # already exists self.assertRaises(exceptions.SessionAlreadyExistsException, c.createSession, sid) # different session self.assertRaises(exceptions.NoSessionException, c.addGraphSpec, sid + "x", [{}]) # invalid dropspec, it has no oid/type (is completely empty actually) self.assertRaises(exceptions.InvalidGraphException, c.addGraphSpec, sid, [{}]) # invalid state, the graph status is only queried when the session is running self.assertRaises(exceptions.InvalidSessionState, c.getGraphStatus, sid) # valid dropspec, but the socket listener app doesn't allow inputs c.addGraphSpec(sid, [{ 'type': 'socket', 'oid': 'a', 'inputs': ['b'] }, { 'oid': 'b', 'type': 'plain', 'storage': 'memory' }]) self.assertRaises(exceptions.InvalidRelationshipException, c.deploySession, sid) # And here we point to an unexisting file, making an invalid drop c.destroySession(sid) c.createSession(sid) fname = tempfile.mktemp() c.addGraphSpec(sid, [{ 'type': 'plain', 'storage': 'file', 'oid': 'a', 'filepath': fname, 'check_filepath_exists': True }]) self.assertRaises(exceptions.InvalidDropException, c.deploySession, sid)
class TestRest(unittest.TestCase): def setUp(self): unittest.TestCase.setUp(self) self.dm = NodeManager(False) self._dm_server = NMRestServer(self.dm) self._dm_t = threading.Thread(target=self._dm_server.start, args=(hostname, constants.NODE_DEFAULT_REST_PORT)) self._dm_t.start() self.dim = DataIslandManager(dmHosts=[hostname]) self._dim_server = CompositeManagerRestServer(self.dim) self._dim_t = threading.Thread( target=self._dim_server.start, args=(hostname, constants.ISLAND_DEFAULT_REST_PORT)) self._dim_t.start() def tearDown(self): unittest.TestCase.tearDown(self) self._dm_server.stop() self._dm_t.join() self.dm.shutdown() self.assertFalse(self._dm_t.isAlive()) self._dim_server.stop() self._dim_t.join() self.dim.shutdown() self.assertFalse(self._dim_t.isAlive()) def test_index(self): # Just check that the HTML pages load properly with RestClient(hostname, constants.NODE_DEFAULT_REST_PORT, 10) as c: c._GET('/') c._GET('/session') def test_errtype(self): sid = 'lala' c = NodeManagerClient(hostname) c.createSession(sid) # already exists self.assertRaises(exceptions.SessionAlreadyExistsException, c.createSession, sid) # different session self.assertRaises(exceptions.NoSessionException, c.addGraphSpec, sid + "x", [{}]) # invalid dropspec, it has no oid/type (is completely empty actually) self.assertRaises(exceptions.InvalidGraphException, c.addGraphSpec, sid, [{}]) # invalid dropspec, app doesn't exist self.assertRaises(exceptions.InvalidGraphException, c.addGraphSpec, sid, [{ 'oid': 'a', 'type': 'app', 'app': 'doesnt.exist' }]) # invalid state, the graph status is only queried when the session is running self.assertRaises(exceptions.InvalidSessionState, c.getGraphStatus, sid) # valid dropspec, but the socket listener app doesn't allow inputs c.addGraphSpec(sid, [{ 'type': 'socket', 'oid': 'a', 'inputs': ['b'] }, { 'oid': 'b', 'type': 'plain', 'storage': 'memory' }]) self.assertRaises(exceptions.InvalidRelationshipException, c.deploySession, sid) # And here we point to an unexisting file, making an invalid drop c.destroySession(sid) c.createSession(sid) fname = tempfile.mktemp() c.addGraphSpec(sid, [{ 'type': 'plain', 'storage': 'file', 'oid': 'a', 'filepath': fname, 'check_filepath_exists': True }]) self.assertRaises(exceptions.InvalidDropException, c.deploySession, sid) def test_recursive(self): sid = 'lala' c = DataIslandManagerClient(hostname) c.createSession(sid) # invalid dropspec, app doesn't exist # This is not checked at the DIM level but only at the NM level # The exception should still pass through though with self.assertRaises(exceptions.SubManagerException) as cm: c.addGraphSpec(sid, [{ 'oid': 'a', 'type': 'app', 'app': 'doesnt.exist', 'node': hostname }]) ex = cm.exception self.assertTrue(hostname in ex.args[0]) self.assertTrue(isinstance(ex.args[0][hostname], InvalidGraphException))
def test_runWithFourDMs(self): """ A test that creates several DROPs in two different DMs and runs the graph. The graph looks like this DM #2 +--------------------------+ | |--> C --| | +---|--> B --|--> D --|--> F --|--| | | |--> E --| | | DM #1 | +--------------------------+ | DM #4 +-----+ | | +---------------------+ | | | |--|--> L --| | | A --|--+ | |--> N --> O | | | | |--|--> M --| | +-----+ | DM #3 | +---------------------+ | +--------------------------+ | | | |--> H --| | | +---|--> G --|--> I --|--> K --|--| | |--> J --| | +--------------------------+ B, F, G, K and N are AppDOs; the rest are plain in-memory DROPs """ dm1 = NodeManager(useDLM=False) dm2 = NodeManager(useDLM=False) dm3 = NodeManager(useDLM=False) dm4 = NodeManager(useDLM=False) sessionId = 's1' g1 = [memory('A', expectedSize=1)] g2 = [sleepAndCopy('B', outputs=['C','D','E'], sleepTime=0), memory('C'), memory('D'), memory('E'), sleepAndCopy('F', inputs=['C','D','E'], sleepTime=0)] g3 = [sleepAndCopy('G', outputs=['H','I','J'], sleepTime=0), memory('H'), memory('I'), memory('J'), sleepAndCopy('K', inputs=['H','I','J'], sleepTime=0)] g4 = [memory('L'), memory('M'), sleepAndCopy('N', inputs=['L','M'], outputs=['O'], sleepTime=0), memory('O')] uris1 = dm1.quickDeploy(sessionId, g1) uris2 = dm2.quickDeploy(sessionId, g2) uris3 = dm3.quickDeploy(sessionId, g3) uris4 = dm4.quickDeploy(sessionId, g4) self.assertEquals(1, len(uris1)) self.assertEquals(5, len(uris2)) self.assertEquals(5, len(uris3)) self.assertEquals(4, len(uris4)) allUris = {} allUris.update(uris1) allUris.update(uris2) allUris.update(uris3) allUris.update(uris4) # We externally wire the Proxy objects to establish the inter-DM # relationships. Intra-DM relationships are already established proxies = {} for uid,uri in allUris.viewitems(): proxies[uid] = Pyro4.Proxy(uri) a = proxies['A'] b = proxies['B'] f = proxies['F'] g = proxies['G'] k = proxies['K'] l = proxies['L'] m = proxies['M'] o = proxies['O'] a.addConsumer(b) a.addConsumer(g) f.addOutput(l) k.addOutput(m) # Run! This should trigger the full execution of the graph with droputils.EvtConsumerProxyCtx(self, o, 1): a.write('a') for dropProxy in proxies.viewvalues(): self.assertEquals(DROPStates.COMPLETED, dropProxy.status, "Status of '%s' is not COMPLETED: %d" % (dropProxy.uid, dropProxy.status)) dropProxy._pyroRelease() for dm in [dm1, dm2, dm3, dm4]: dm.destroySession(sessionId)
def test_runGraphSeveralDropsPerDM(self): """ A test that creates several DROPs in two different DMs and runs the graph. The graph looks like this DM #1 DM #2 =================== ================ | A --> C --> D --|----|-| | | | | |--> E --> F | | B --------------|----|-| | =================== ================ :see: `self.test_runGraphSingleDOPerDOM` """ dm1 = NodeManager(useDLM=False) dm2 = NodeManager(useDLM=False) sessionId = 's1' g1 = [{"oid":"A", "type":"plain", "storage": "memory", "consumers":["C"]}, {"oid":"B", "type":"plain", "storage": "memory"}, {"oid":"C", "type":"app", "app":"dfms.apps.crc.CRCApp"}, {"oid":"D", "type":"plain", "storage": "memory", "producers": ["C"]}] g2 = [{"oid":"E", "type":"app", "app":"test.test_drop.SumupContainerChecksum"}, {"oid":"F", "type":"plain", "storage": "memory", "producers":["E"]}] uris1 = dm1.quickDeploy(sessionId, g1) uris2 = dm2.quickDeploy(sessionId, g2) self.assertEquals(4, len(uris1)) self.assertEquals(2, len(uris2)) # We externally wire the Proxy objects to establish the inter-DM # relationships a = Pyro4.Proxy(uris1['A']) b = Pyro4.Proxy(uris1['B']) c = Pyro4.Proxy(uris1['C']) d = Pyro4.Proxy(uris1['D']) e = Pyro4.Proxy(uris2['E']) f = Pyro4.Proxy(uris2['F']) for drop,uid in [(a,'A'),(b,'B'),(c,'C'),(d,'D'),(e,'E'),(f,'F')]: self.assertEquals(uid, drop.uid, "Proxy is not the DROP we think should be (assumed: %s/ actual: %s)" % (uid, drop.uid)) e.addInput(d) e.addInput(b) # Run! The sole fact that this doesn't throw exceptions is already # a good proof that everything is working as expected with droputils.EvtConsumerProxyCtx(self, f, 5): a.write('a') a.setCompleted() b.write('a') b.setCompleted() for drop in a,b,c,d,e,f: self.assertEquals(DROPStates.COMPLETED, drop.status, "DROP %s is not COMPLETED" % (drop.uid)) self.assertEquals(a.checksum, int(droputils.allDropContents(d))) self.assertEquals(b.checksum + d.checksum, int(droputils.allDropContents(f))) for dropProxy in a,b,c,d,e,f: dropProxy._pyroRelease() dm1.destroySession(sessionId) dm2.destroySession(sessionId)