def test_basic_run(self): signal = SignalGenerateAndAverageDrop('1', '1', internal_port=internal_port, stream_port=stream_port, start_freq=int(os.environ.get('START_FREQ', 45991200)), freq_step=int(os.environ.get('FREQ_STEP', 6400)), use_gpus=int(os.environ.get('USE_GPUS', 0)), num_freq_steps=int(os.environ.get('NUM_CHANNELS', 1)), telescope_model_path='./conf/%s.tm' % tm, sky_model_file_path="./conf/eor_model_list.csv", num_time_steps=int(os.environ.get('NUM_TIME_STEPS', 1))) sink = AveragerSinkDrop('2', '2', stream_listen_port_start=stream_port, use_adios2=int(os.environ.get('USE_ADIOS2', 0)), baseline_exclusion_map_path='./conf/%s_baselines.csv' % tm, node='127.0.0.1') drop = InMemoryDROP('3', '3') drop.addStreamingConsumer(sink) signal.addOutput(drop) ms = FileDROP('4', '4', filepath=output) sink.addOutput(ms) with droputils.DROPWaiterCtx(self, ms, 1000): signal.async_execute()
def test_namedPorts_with_kwonlyargs(self): """ Use a graph with named ports and check whether it is runnning """ init_oids = [ "2022-03-30T03:46:01_-2_0", "2022-03-30T03:46:01_-6_0", ] # first drops in graph sessionId = "lalo" with pkg_resources.resource_stream( "test", "graphs/pyfunc_glob_testPG.graph") as f: # @UndefinedVariable graphSpec = json.load(f) # dropSpecs = graph_loader.loadDropSpecs(graphSpec) self.createSessionAndAddGraph(sessionId, graphSpec=graphSpec) # Deploy now and get OIDs self.dim.deploySession(sessionId) fd = self.dm._sessions[sessionId].drops["2022-03-30T03:46:01_-1_0"] i = 0 start_drops = [InMemoryDROP(x, x) for x in ("a", "b")] for oid in init_oids: init_drop = self.dm._sessions[sessionId].drops[oid] init_drop.addInput(start_drops[i]) i += 1 logger.debug(f"PyfuncAPPDrop: {dir(fd)}") for i in fd.parameters["inputs"]: logger.debug(f"PyfuncAPPDrop input names:{i}") with droputils.DROPWaiterCtx(self, init_drop, 3): [a.setCompleted() for a in start_drops]
def test_ddGraph(self): """ Graph is using dd to read a file and write to another. This is mainly to test that the separatorString parameter is working correctly. """ sessionId = "lalo" ddGraph = "graphs/ddTest.graph" with pkg_resources.resource_stream("test", ddGraph) as f: # @UndefinedVariable logger.debug(f"Loading graph: {f}") graphSpec = json.load(f) self.createSessionAndAddGraph(sessionId, graphSpec=graphSpec) # Deploy now and get OIDs bs = graphSpec[0]["applicationArgs"]["bs"]["value"] count = graphSpec[0]["applicationArgs"]["count"]["value"] self.dim.deploySession(sessionId) a, c = [ self.dm._sessions[sessionId].drops[x] for x in ("2022-02-11T08:05:47_-5_0", "2022-02-11T08:05:47_-3_0") ] data = os.urandom(bs * count) logger.debug(f"Length of data produced: {len(data)}") with droputils.DROPWaiterCtx(self, c, 3): a.write(data) a.setCompleted() self.assertEqual(data, droputils.allDropContents(c))
def test_sessionStatus(self): def assertSessionStatus(sessionId, status): sessionStatusMM = self.mm.getSessionStatus(sessionId) sessionStatusDIM = self.dim.getSessionStatus(sessionId) sessionStatusNM = self.nm.getSessionStatus(sessionId) self.assertEqual(1, len(sessionStatusMM)) self.assertIn(hostname, sessionStatusMM) self.assertDictEqual(sessionStatusDIM, sessionStatusMM[hostname]) self.assertEqual(sessionStatusNM, sessionStatusMM[hostname][hostname]) self.assertEqual(sessionStatusNM, status) sessionId = "lala" self.mm.createSession(sessionId) assertSessionStatus(sessionId, SessionStates.PRISTINE) sessionId = "lalo" self.createSessionAndAddTypicalGraph(sessionId) assertSessionStatus(sessionId, SessionStates.BUILDING) self.nm.deploySession(sessionId) assertSessionStatus(sessionId, SessionStates.RUNNING) a, c = [self.nm._sessions[sessionId].drops[x] for x in ("A", "C")] data = os.urandom(10) with droputils.DROPWaiterCtx(self, c, 3): a.write(data) a.setCompleted() assertSessionStatus(sessionId, SessionStates.FINISHED)
def test_plasma(self): in_file = '/tmp/test.ms' out_file = '/tmp/copy.ms' with tarfile.open('./data/test_ms.tar.gz', 'r') as ref: ref.extractall('/tmp/') a = FileDROP('a', 'a', filepath=in_file) b = MSPlasmaWriter('b', 'b') c = PlasmaDROP('c', 'c') d = MSPlasmaReader('d', 'd') e = FileDROP('e', 'e', filepath=out_file) b.addInput(a) b.addOutput(c) d.addInput(c) d.addOutput(e) # Check the MS DATA content is the same as original with droputils.DROPWaiterCtx(self, e, 5): a.setCompleted() self.compare_ms(in_file, out_file) # check we can go from dataURL to plasma ID client = plasma.connect("/tmp/plasma") a = c.dataURL.split('//')[1].decode("hex") client.get(plasma.ObjectID(a))
def _do_test(func, expected_out, *args, **kwargs): # List with (drop, value) elements arg_inputs = [] # dict with name: (drop, value) items kwarg_inputs = {} translate = lambda x: base64.b64encode(pickle.dumps(x)) i = 0 for arg in args: si = 'uid_%d' % i arg_inputs.append(InMemoryDROP(si, si, pydata=translate(arg))) i += 1 for name, kwarg in kwargs.items(): si = 'uid_%d' % i kwarg_inputs[name] = (si, InMemoryDROP(si, si, pydata=translate(kwarg))) i += 1 a = InMemoryDROP('a', 'a', pydata=translate(1)) output = InMemoryDROP('o', 'o') app = _PyFuncApp('f', 'f', func, func_arg_mapping={name: vals[0] for name, vals in kwarg_inputs.items()}) app.addInput(a) app.addOutput(output) for drop in arg_inputs + [x[1] for x in kwarg_inputs.values()]: app.addInput(drop) with droputils.DROPWaiterCtx(self, output): a.setCompleted() for i in arg_inputs + [x[1] for x in kwarg_inputs.values()]: i.setCompleted() self.assertEqual(expected_out, pickle.loads(droputils.allDropContents(output))) # @UndefinedVariable
def test_namedPorts(self): """ Use a graph with named ports and check whether it is runnning """ init_oid = "2022-03-20T04:33:27_-2_0" # first drop in graph sessionId = "lalo" with pkg_resources.resource_stream( "test", "graphs/funcTestPG_namedPorts.graph" ) as f: # @UndefinedVariable graphSpec = json.load(f) # dropSpecs = graph_loader.loadDropSpecs(graphSpec) self.createSessionAndAddGraph(sessionId, graphSpec=graphSpec) # Deploy now and get OIDs self.dim.deploySession(sessionId) fd = self.dm._sessions[sessionId].drops["2022-03-20T04:33:27_-1_0"] init_drop = self.dm._sessions[sessionId].drops[init_oid] a = InMemoryDROP("a", "a") init_drop.addInput(a) logger.debug(f"PyfuncAPPDrop: {dir(fd)}") for i in fd.parameters["inputs"]: logger.debug(f"PyfuncAPPDrop input names:{i}") with droputils.DROPWaiterCtx(self, init_drop, 3): a.setCompleted()
def test_sessionStatus(self): def assertSessionStatus(sessionId, status): sessionStatus = self.dim.getSessionStatus(sessionId) self.assertEqual(1, len(sessionStatus)) self.assertIn(hostname, sessionStatus) self.assertEqual(status, sessionStatus[hostname]) self.assertEqual(status, self.dm.getSessionStatus(sessionId)) sessionId = 'lala' self.dim.createSession(sessionId) assertSessionStatus(sessionId, SessionStates.PRISTINE) sessionId = 'lalo' self.createSessionAndAddTypicalGraph(sessionId) assertSessionStatus(sessionId, SessionStates.BUILDING) self.dm.deploySession(sessionId) assertSessionStatus(sessionId, SessionStates.RUNNING) a, c = [self.dm._sessions[sessionId].drops[x] for x in ('A', 'C')] data = os.urandom(10) with droputils.DROPWaiterCtx(self, c, 3): a.write(data) a.setCompleted() assertSessionStatus(sessionId, SessionStates.FINISHED)
def _test_graph_runs(self, drops, first, last, timeout=1): first = droputils.listify(first) with droputils.DROPWaiterCtx(self, last, timeout): for f in first: f.setCompleted() for x in drops: self.assertEqual(DROPStates.COMPLETED, x.status)
def test_many_relationships(self): """ A test in which a drop is related to many other drops that live in a separate DM. Drop A is accessed by many applications (B1, B2, .., BN), which should not exhaust resources on DM #1. We collapse all into C so we can monitor only its status to know that the execution is over. DM #1 DM #2 ======= ==================== | | | |--> B1 --| | | | | |--> B2 --| | | A --|----|-|--> B3 --|--> C | | | | |.........| | | | | |--> BN --| | ======= ==================== """ dm1, dm2 = [self._start_dm() for _ in range(2)] sessionId = "s1" N = 100 g1 = [{"oid": "A", "type": "plain", "storage": Categories.MEMORY}] g2 = [{"oid": "C", "type": "plain", "storage": Categories.MEMORY}] rels = [] for i in range(N): b_oid = "B%d" % (i, ) # SleepAndCopyApp effectively opens the input drop g2.append({ "oid": b_oid, "type": "app", "app": "dlg.apps.simple.SleepAndCopyApp", "outputs": ["C"], "sleepTime": 0, }) rels.append(DROPRel("A", DROPLinkType.INPUT, b_oid)) add_test_reprodata(g1) add_test_reprodata(g2) quickDeploy(dm1, sessionId, g1, {nm_conninfo(1): rels}) quickDeploy(dm2, sessionId, g2, {nm_conninfo(0): rels}) self.assertEqual(1, len(dm1._sessions[sessionId].drops)) self.assertEqual(1 + N, len(dm2._sessions[sessionId].drops)) # Run! The sole fact that this doesn't throw exceptions is already # a good proof that everything is working as expected a = dm1._sessions[sessionId].drops["A"] c = dm2._sessions[sessionId].drops["C"] with droputils.DROPWaiterCtx(self, c, 10): a.write(b"a") a.setCompleted() for i in range(N): drop = dm2._sessions[sessionId].drops["B%d" % (i, )] self.assertEqual(DROPStates.COMPLETED, drop.status) dm1.destroySession(sessionId) dm2.destroySession(sessionId)
def _test_runGraphInTwoNMs( self, g1, g2, rels, root_data, leaf_data, root_oids=("A",), leaf_oid="C", expected_failures=[], sessionId=f"s{random.randint(0, 1000)}", node_managers=None, threads=0, ): """Utility to run a graph in two Node Managers""" dm1, dm2 = node_managers or [self._start_dm(threads=threads) for _ in range(2)] add_test_reprodata(g1) add_test_reprodata(g2) quickDeploy(dm1, sessionId, g1, {nm_conninfo(1): rels}) quickDeploy(dm2, sessionId, g2, {nm_conninfo(0): rels}) self.assertEqual(len(g1), len(dm1._sessions[sessionId].drops)) self.assertEqual(len(g2), len(dm2._sessions[sessionId].drops)) # Run! We wait until c is completed drops = {} drops.update(dm1._sessions[sessionId].drops) drops.update(dm2._sessions[sessionId].drops) leaf_drop = drops[leaf_oid] with droputils.DROPWaiterCtx(self, leaf_drop, 2): for oid in root_oids: drop = drops[oid] drop.write(root_data) drop.setCompleted() expected_successes = [ drops[oid] for oid in drops if oid not in expected_failures ] expected_failures = [drops[oid] for oid in drops if oid in expected_failures] for drop in expected_successes: self.assertEqual(DROPStates.COMPLETED, drop.status) for drop in expected_failures: self.assertEqual(DROPStates.ERROR, drop.status) leaf_drop_data = None if leaf_drop not in expected_failures: leaf_drop_data = droputils.allDropContents(leaf_drop) if leaf_data is not None: self.assertEqual(len(leaf_data), len(leaf_drop_data)) self.assertEqual(leaf_data, leaf_drop_data) sleep(0.1) # just make sure all events have been processed. dm1.destroySession(sessionId) dm2.destroySession(sessionId) return leaf_drop_data
def _test_deployGraphWithCompletedDOs(self, sessionId): self.createSessionAndAddTypicalGraph(sessionId, sleepTime=1) # Deploy now and get C self.dim.deploySession(sessionId, completedDrops=['A']) c = self.dm._sessions[sessionId].drops['C'] # This should be happening before the sleepTime expires with droputils.DROPWaiterCtx(self, c, 2): pass self.assertEqual(DROPStates.COMPLETED, c.status)
def test_cancel_dynlibprocapp(self): """Checks that we can cancel a long-running dynlib proc app""" a = DynlibProcApp("a", "a", lib=_libpath, sleep_seconds=10) with droputils.DROPWaiterCtx(self, (), timeout=0): a.async_execute() time.sleep(1) t0 = time.time() a.cancel() self.assertLess(time.time() - t0, 1, "Cancelled dynlibprocapp in less than a second") self.assertEqual(DROPStates.CANCELLED, a.status)
def test_runGraphSeveralDropsPerDM(self): """ A test that creates several DROPs in two different DMs and runs the graph. The graph looks like this DM #1 DM #2 =================== ================ | A --> C --> D --|----|-| | | | | |--> E --> F | | B --------------|----|-| | =================== ================ :see: `self.test_runGraphSingleDOPerDOM` """ dm1, dm2 = [self._start_dm() for _ in range(2)] sessionId = 's1' g1 = [{"oid":"A", "type":"plain", "storage": "memory", "consumers":["C"]}, {"oid":"B", "type":"plain", "storage": "memory"}, {"oid":"C", "type":"app", "app":"dlg.apps.crc.CRCApp"}, {"oid":"D", "type":"plain", "storage": "memory", "producers": ["C"]}] g2 = [{"oid":"E", "type":"app", "app":"test.test_drop.SumupContainerChecksum"}, {"oid":"F", "type":"plain", "storage": "memory", "producers":["E"]}] rels = [DROPRel('D', DROPLinkType.INPUT, 'E'), DROPRel('B', DROPLinkType.INPUT, 'E')] quickDeploy(dm1, sessionId, g1, {nm_conninfo(1): rels}) quickDeploy(dm2, sessionId, g2, {nm_conninfo(0): rels}) self.assertEqual(4, len(dm1._sessions[sessionId].drops)) self.assertEqual(2, len(dm2._sessions[sessionId].drops)) # Run! The sole fact that this doesn't throw exceptions is already # a good proof that everything is working as expected a,b,c,d = [dm1._sessions[sessionId].drops[x] for x in ('A', 'B', 'C', 'D')] e,f = [dm2._sessions[sessionId].drops[x] for x in ('E', 'F')] with droputils.DROPWaiterCtx(self, f, 5): a.write(b'a') a.setCompleted() b.write(b'a') b.setCompleted() for drop in a,b,c,d,e,f: self.assertEqual(DROPStates.COMPLETED, drop.status, "DROP %s is not COMPLETED" % (drop.uid)) self.assertEqual(a.checksum, int(droputils.allDropContents(d))) self.assertEqual(b.checksum + d.checksum, int(droputils.allDropContents(f))) dm1.destroySession(sessionId) dm2.destroySession(sessionId)
def _test_simple_copy(self, streaming): """ Checks that the following graph works, both in streaming and batch mode: A ----> B ----> C | +--> D | +--> E ----> F +--> G Both B and E use the same dynamically loaded library and work with their input A to copy the inputs into their outputs. """ # Build the graph a = (NullDROP if streaming else InMemoryDROP)("a", "a") b, e = ( (DynlibStreamApp if streaming else DynlibApp)( x, x, lib=_libpath, print_stats=print_stats, bufsize=bufsize ) for x in ("b", "e") ) c, d, f, g = (InMemoryDROP(x, x) for x in ("c", "d", "f", "g")) for app, outputs in (b, (c, d)), (e, (f, g)): (app.addStreamingInput if streaming else app.addInput)(a) for o in outputs: app.addOutput(o) # ~100 MBs of data should be copied over from a to c and d via b, etc data = os.urandom(1024 * 1024) * 100 reader = six.BytesIO(data) with droputils.DROPWaiterCtx(self, (c, d, f, g), 10): if streaming: # Write the data in chunks so we actually exercise multiple calls # to the data_written library call for datum in iter(functools.partial(reader.read, 1024 * 1024), b""): a.write(datum) else: a.write(data) a.setCompleted() for drop in (c, d, f, g): drop_data = droputils.allDropContents(drop) self.assertEqual( len(data), len(drop_data), "Data from %r is not what we wanted :(" % (drop,), ) self.assertEqual(data, drop_data)
def test_deployGraph(self): sessionId = 'lalo' self.createSessionAndAddTypicalGraph(sessionId) # Deploy now and get A and C self.dim.deploySession(sessionId) a, c = [self.dm._sessions[sessionId].drops[x] for x in ('A', 'C')] data = os.urandom(10) with droputils.DROPWaiterCtx(self, c, 3): a.write(data) a.setCompleted() self.assertEqual(data, droputils.allDropContents(c))
def test_agg_and_rep(self): # aggregate agg = FitsImageAggregator('0', '0', freq_step=1000.0) file1 = FileDROP('1', '1', filepath='image_eor01.restored.fits', dirname='/tmp/output/') file2 = FileDROP('2', '2', filepath='image_eor02.restored.fits', dirname='/tmp/output/') file3 = FileDROP('3', '3', filepath='image_eor03.restored.fits', dirname='/tmp/output/') file4 = FileDROP('4', '4', filepath='image_eor04.restored.fits', dirname='/tmp/output/') agg.addInput(file1) agg.addInput(file2) agg.addInput(file3) agg.addInput(file4) output = FileDROP('10', '10', filepath='summit.fits', dirname='/tmp/output/') agg.addOutput(output) # replicate rep = FitsImageReplicator('11', '11', copies=4) rep_output = FileDROP('12', '12', filepath='summit_replication.fits', dirname='/tmp/output/') rep.addInput(output) rep.addOutput(rep_output) with droputils.DROPWaiterCtx(self, rep, 1000): file1.setCompleted() file2.setCompleted() file3.setCompleted() file4.setCompleted()
def test_with_dynlib(self): """ We test the following graph: A -----> B ----> C ---> D ---> E | +--> F ---> G ---> H +------------------> I ---> J A and C are FileDrops; B is a DynlibApp; D, G and I are CRCApps; F, E, H and J are InMemoryDrops. The DynlibApp B copies A into C and F; therefore D, G and I should yield the same results, meaning that E, H and J should have the same contents. Similarly, A, C and F should have the same contents. This graph was experiencing some problems in a MacOS machine. Hopefully this test will shed some light on that issue and allow us to track it down and fix it. """ # Build drops and wire them together a, c = (FileDROP(x, x) for x in ('a', 'c')) b = DynlibApp('b', 'b', lib=test_dynlib._libpath) d, g, i = (CRCApp(x, x) for x in ('d', 'g', 'i')) f, e, h, j = (InMemoryDROP(x, x) for x in ('f', 'e', 'h', 'j')) for data, app in (a, b), (c, d), (f, g), (a, i): app.addInput(data) for app, data in (b, c), (b, f), (d, e), (g, h), (i, j): app.addOutput(data) # The crc32 is the same used by the CRCApp, see the imports data = os.urandom(1024) crc = six.b(str(crc32(data))) # Execute the graph and check results with droputils.DROPWaiterCtx(self, (e, h, j), 5): a.write(data) a.setCompleted() # Data and CRCs are the expected ones for what, who in (data, (a, c, f)), (crc, (e, h, j)): for drop in who: self.assertEqual(what, droputils.allDropContents(drop))
def test_getGraphStatus(self): def assertGraphStatus(sessionId, expectedStatus): graphStatusByDim = self.dim.getGraphStatus(sessionId) graphStatusByDM = self.dm.getGraphStatus(sessionId) self.assertDictEqual(graphStatusByDim, graphStatusByDM) for dropStatus in graphStatusByDim.values(): self.assertEqual(expectedStatus, dropStatus['status']) sessionId = 'lala' self.createSessionAndAddTypicalGraph(sessionId) self.dim.deploySession(sessionId) assertGraphStatus(sessionId, DROPStates.INITIALIZED) a, c = [self.dm._sessions[sessionId].drops[x] for x in ('A', 'C')] data = os.urandom(10) with droputils.DROPWaiterCtx(self, c, 3): a.write(data) a.setCompleted() assertGraphStatus(sessionId, DROPStates.COMPLETED)
def _test_runGraphInTwoNMs(self, g1, g2, rels, root_data, leaf_data, root_oids=('A',), leaf_oid='C', expected_failures=[]): """Utility to run a graph in two Node Managers""" dm1, dm2 = [self._start_dm() for _ in range(2)] sessionId = 's1' quickDeploy(dm1, sessionId, g1, {nm_conninfo(1): rels}) quickDeploy(dm2, sessionId, g2, {nm_conninfo(0): rels}) self.assertEqual(len(g1), len(dm1._sessions[sessionId].drops)) self.assertEqual(len(g2), len(dm2._sessions[sessionId].drops)) # Run! We wait until c is completed drops = {} drops.update(dm1._sessions[sessionId].drops) drops.update(dm2._sessions[sessionId].drops) leaf_drop = drops[leaf_oid] with droputils.DROPWaiterCtx(self, leaf_drop, 1): for oid in root_oids: drop = drops[oid] drop.write(root_data) drop.setCompleted() expected_successes = [drops[oid] for oid in drops if oid not in expected_failures] expected_failures = [drops[oid] for oid in drops if oid in expected_failures] for drop in expected_successes: self.assertEqual(DROPStates.COMPLETED, drop.status) for drop in expected_failures: self.assertEqual(DROPStates.ERROR, drop.status) leaf_drop_data = None if leaf_drop not in expected_failures: leaf_drop_data = droputils.allDropContents(leaf_drop) if leaf_data is not None: self.assertEqual(len(leaf_data), len(leaf_drop_data)) self.assertEqual(leaf_data, leaf_drop_data) dm1.destroySession(sessionId) dm2.destroySession(sessionId) return leaf_drop_data
def test_pos_only_args(self): """ Use a graph with compile function to test positional only arguments """ sessionId = "lalo" with pkg_resources.resource_stream( "test", "graphs/compilePG.graph") as f: # @UndefinedVariable graphSpec = json.load(f) # dropSpecs = graph_loader.loadDropSpecs(graphSpec) self.createSessionAndAddGraph(sessionId, graphSpec=graphSpec) # Deploy now and get OIDs self.dim.deploySession(sessionId) sd = self.dm._sessions[sessionId].drops["2022-05-06T08:43:26_-2_0"] fd = self.dm._sessions[sessionId].drops["2022-05-06T08:43:26_-1_0"] with droputils.DROPWaiterCtx(self, fd, 3): sd.setCompleted() #logger.debug(f'PyfuncAPPDrop signature: {dir(fd)}') logger.debug(f'PyfuncAPPDrop status: {fd.status}') self.assertEqual(2, fd.status)
def test_ArrayLoop(self): """ Use a graph with compile function to test positional only arguments """ sessionId = "lalo" start_drop = InMemoryDROP('a', 'a') with pkg_resources.resource_stream( "test", "graphs/ArrayLoopPG.graph") as f: # @UndefinedVariable graphSpec = json.load(f) # dropSpecs = graph_loader.loadDropSpecs(graphSpec) self.createSessionAndAddGraph(sessionId, graphSpec=graphSpec) # Deploy now and get OIDs self.dim.deploySession(sessionId) sd = self.dm._sessions[sessionId].drops["2022-06-22T09:13:53_-1_0"] sd.addInput(start_drop) fd = self.dm._sessions[sessionId].drops["2022-06-22T09:13:53_-4_0/0/0"] with droputils.DROPWaiterCtx(self, fd, 3): start_drop.setCompleted() #logger.debug(f'PyfuncAPPDrop signature: {dir(fd)}') logger.debug(f'PyfuncAPPDrop status: {fd.status}') self.assertEqual(2, fd.status)
def test_runWithFourDMs(self): """ A test that creates several DROPs in two different DMs and runs the graph. The graph looks like this DM #2 +--------------------------+ | |--> C --| | +---|--> B --|--> D --|--> F --|--| | | |--> E --| | | DM #1 | +--------------------------+ | DM #4 +-----+ | | +---------------------+ | | | |--|--> L --| | | A --|--+ | |--> N --> O | | | | |--|--> M --| | +-----+ | DM #3 | +---------------------+ | +--------------------------+ | | | |--> H --| | | +---|--> G --|--> I --|--> K --|--| | |--> J --| | +--------------------------+ B, F, G, K and N are AppDOs; the rest are plain in-memory DROPs """ dm1, dm2, dm3, dm4 = [self._start_dm() for _ in range(4)] sessionId = "s1" g1 = [memory("A", expectedSize=1)] g2 = [ sleepAndCopy("B", outputs=["C", "D", "E"], sleepTime=0), memory("C"), memory("D"), memory("E"), sleepAndCopy("F", inputs=["C", "D", "E"], sleepTime=0), ] g3 = [ sleepAndCopy("G", outputs=["H", "I", "J"], sleepTime=0), memory("H"), memory("I"), memory("J"), sleepAndCopy("K", inputs=["H", "I", "J"], sleepTime=0), ] g4 = [ memory("L"), memory("M"), sleepAndCopy("N", inputs=["L", "M"], outputs=["O"], sleepTime=0), memory("O"), ] rels_12 = [DROPRel("A", DROPLinkType.INPUT, "B")] rels_13 = [DROPRel("A", DROPLinkType.INPUT, "G")] rels_24 = [DROPRel("F", DROPLinkType.PRODUCER, "L")] rels_34 = [DROPRel("K", DROPLinkType.PRODUCER, "M")] quickDeploy(dm1, sessionId, g1, { nm_conninfo(1): rels_12, nm_conninfo(2): rels_13 }) quickDeploy(dm2, sessionId, g2, { nm_conninfo(0): rels_12, nm_conninfo(3): rels_24 }) quickDeploy(dm3, sessionId, g3, { nm_conninfo(0): rels_13, nm_conninfo(3): rels_34 }) quickDeploy(dm4, sessionId, g4, { nm_conninfo(1): rels_24, nm_conninfo(2): rels_34 }) self.assertEqual(1, len(dm1._sessions[sessionId].drops)) self.assertEqual(5, len(dm2._sessions[sessionId].drops)) self.assertEqual(5, len(dm3._sessions[sessionId].drops)) self.assertEqual(4, len(dm4._sessions[sessionId].drops)) a = dm1._sessions[sessionId].drops["A"] o = dm4._sessions[sessionId].drops["O"] drops = [] for x in (dm1, dm2, dm3, dm4): drops += x._sessions[sessionId].drops.values() # Run! This should trigger the full execution of the graph with droputils.DROPWaiterCtx(self, o, 5): a.write(b"a") for drop in drops: self.assertEqual( DROPStates.COMPLETED, drop.status, "Status of '%s' is not COMPLETED: %d" % (drop.uid, drop.status), ) for dm in [dm1, dm2, dm3, dm4]: dm.destroySession(sessionId)
def test_runWithFourDMs(self): """ A test that creates several DROPs in two different DMs and runs the graph. The graph looks like this DM #2 +--------------------------+ | |--> C --| | +---|--> B --|--> D --|--> F --|--| | | |--> E --| | | DM #1 | +--------------------------+ | DM #4 +-----+ | | +---------------------+ | | | |--|--> L --| | | A --|--+ | |--> N --> O | | | | |--|--> M --| | +-----+ | DM #3 | +---------------------+ | +--------------------------+ | | | |--> H --| | | +---|--> G --|--> I --|--> K --|--| | |--> J --| | +--------------------------+ B, F, G, K and N are AppDOs; the rest are plain in-memory DROPs """ dm1, dm2, dm3, dm4 = [self._start_dm() for _ in range(4)] sessionId = 's1' g1 = [memory('A', expectedSize=1)] g2 = [sleepAndCopy('B', outputs=['C','D','E'], sleepTime=0), memory('C'), memory('D'), memory('E'), sleepAndCopy('F', inputs=['C','D','E'], sleepTime=0)] g3 = [sleepAndCopy('G', outputs=['H','I','J'], sleepTime=0), memory('H'), memory('I'), memory('J'), sleepAndCopy('K', inputs=['H','I','J'], sleepTime=0)] g4 = [memory('L'), memory('M'), sleepAndCopy('N', inputs=['L','M'], outputs=['O'], sleepTime=0), memory('O')] rels_12 = [DROPRel('A', DROPLinkType.INPUT, 'B')] rels_13 = [DROPRel('A', DROPLinkType.INPUT, 'G')] rels_24 = [DROPRel('F', DROPLinkType.PRODUCER, 'L')] rels_34 = [DROPRel('K', DROPLinkType.PRODUCER, 'M')] quickDeploy(dm1, sessionId, g1, {nm_conninfo(1): rels_12, nm_conninfo(2): rels_13}) quickDeploy(dm2, sessionId, g2, {nm_conninfo(0): rels_12, nm_conninfo(3): rels_24}) quickDeploy(dm3, sessionId, g3, {nm_conninfo(0): rels_13, nm_conninfo(3): rels_34}) quickDeploy(dm4, sessionId, g4, {nm_conninfo(1): rels_24, nm_conninfo(2): rels_34}) self.assertEqual(1, len(dm1._sessions[sessionId].drops)) self.assertEqual(5, len(dm2._sessions[sessionId].drops)) self.assertEqual(5, len(dm3._sessions[sessionId].drops)) self.assertEqual(4, len(dm4._sessions[sessionId].drops)) a = dm1._sessions[sessionId].drops['A'] o = dm4._sessions[sessionId].drops['O'] drops = [] for x in (dm1, dm2, dm3, dm4): drops += x._sessions[sessionId].drops.values() # Run! This should trigger the full execution of the graph with droputils.DROPWaiterCtx(self, o, 5): a.write(b'a') for drop in drops: self.assertEqual(DROPStates.COMPLETED, drop.status, "Status of '%s' is not COMPLETED: %d" % (drop.uid, drop.status)) for dm in [dm1, dm2, dm3, dm4]: dm.destroySession(sessionId)