def test_enter_exit(self): topo = Topology() streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy')) source = op.Source(topo, 'com.ibm.streamsx.topology.pytest.checkpoint::EnterExitSource', schema.StreamSchema('tuple<rstring from, int32 enter, int32 exit>').as_tuple(), params={'period':0.1}) source.stream.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) transit = op.Map('com.ibm.streamsx.topology.pytest.checkpoint::EnterExitMap', source.stream, schema.StreamSchema('tuple<rstring from, int32 enter, int32 exit>').as_tuple()) tester = Tester(topo) tester.resets(10) # On each operator, __enter__ and __exit__ should be called once for # each reset. Also __enter__ should be called at startup and __exit__ # at shutdown. It is hard to verify the final __exit__ call (and that # is handled by python rather than our code), so # the test is valid if the number of __enter__ calls is one more than # the number of resets, and the number of __exit__ calls is equal to # number of resets. The tuples on the two streams indicate the number # of times __enter__ and __exit__ have been called. # We are looking for two specific tuples: # ('source', 6, 5) and ('transit', 6, 5) tester.eventual_result(source.stream, lambda tuple_ : True if tuple_[1] >= 6 and tuple_[1] == tuple_[2] + 1 else Fale if tuple_[1] != tuple_[2] + 1 else None) tester.eventual_result(transit.stream, lambda tuple_ : True if tuple_[1] >= 6 and tuple_[1] == tuple_[2] + 1 else Fale if tuple_[1] != tuple_[2] + 1 else None) job_config = streamsx.topology.context.JobConfig(tracing='debug') job_config.add(self.test_config) tester.test(self.test_ctxtype, self.test_config)
def test_primitive_foreach(self): iterations = 3000 topo = Topology() topo.checkpoint_period = timedelta(seconds=1) streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy')) timeCounter = op.Source( topo, "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={ 'iterations': iterations, 'period': 0.01 }) timeCounter.stream.set_consistent( ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) fizzbuzz = op.Map( "com.ibm.streamsx.topology.pytest.checkpoint::FizzBuzzPrimitive", timeCounter.stream, schema.StreamSchema('tuple<int32 f, rstring c>').as_tuple()) verify = op.Sink("com.ibm.streamsx.topology.pytest.checkpoint::Verify", fizzbuzz.stream) s = fizzbuzz.stream tester = Tester(topo) tester.resets() tester.tuple_count(s, iterations) tester.test(self.test_ctxtype, self.test_config)
def test_source(self): iterations = 3000 topo = Topology() streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy')) bop = op.Source( topo, "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={ 'iterations': iterations, 'period': 0.01 }) s = bop.stream s.set_consistent( ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) tester = Tester(topo) tester.resets() tester.tuple_count(s, iterations) tester.contents(s, list(zip(range(0, iterations)))) # job_config = streamsx.topology.context.JobConfig(tracing='debug') # job_config.add(self.test_config) tester.test(self.test_ctxtype, self.test_config)
def test_flat_map(self): count = 1000 reset_count = 5 topo = Topology() lines = topo.source( ListIterator( ["All work", "and no play", "makes Jack", "a dull boy"], period=0.01, count=count)) if self.is_cr(): lines.set_consistent( ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) words = lines.flat_map(StatefulSplit()) tester = Tester(topo) if self.is_cr(): tester.resets(reset_count) # Find the expected results. flat_contents = [ "All", "work", "and", "no", "play", "makes", "Jack", "a", "dull", "boy" ] # repeat the contents 1000 times. expected = [] for i in range(count): expected.extend(flat_contents) tester.contents(words, expected) tester.test(self.test_ctxtype, self.test_config)
def test_beacon(self): # An operator-driven consistent region can be used with a source # that supports it, such as Beacon iterations = 5000 topo = Topology() beacon = op.Source(topo, "spl.utility::Beacon", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={ 'iterations': iterations, 'period': 0.01, 'triggerCount': streamsx.spl.types.uint32(500) }) beacon.f = beacon.output('(int32)IterationCount()') s = beacon.stream s.set_consistent( ConsistentRegionConfig.operator_driven(drain_timeout=40, reset_timeout=40, max_consecutive_attempts=4)) tester = Tester(topo) # For operator-driven regions, the resetter uses a random interval # from 10-40 seconds for resets. Only one is likely to be completed # while processing tuples for this test. tester.resets(1) tester.tuple_count(s, iterations) tester.contents(s, list(zip(range(0, iterations)))) tester.test(self.test_ctxtype, self.test_config)
def test_aggregate(self): iterations = 3000 reset_count = 5 topo = Topology() # Generate integers from [0,3000) s = topo.source(TimeCounter(iterations=iterations, period=0.01)) if self.is_cr(): s.set_consistent( ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) # Filter the odd ones s = s.filter(StatefulEvenFilter()) # Halve the even ones and add one. Now have integers [1,(iterations/2)) s = s.map(StatefulHalfPlusOne()) sc = s.last(10).trigger(3).aggregate(StatefulAverage()) st = s.last(17).trigger(timedelta(seconds=2)).aggregate( StatefulAverage()) # non-stateful aggregation functions nsc = s.last(19).trigger(13).aggregate(lambda tuples: sum(tuples)) tester = Tester(topo) if self.is_cr(): tester.resets(reset_count) # Find the expected results. # mimic the processing using Python builtins iv = filter(StatefulEvenFilter(), range(iterations)) iv = list(map(StatefulHalfPlusOne(), iv)) # Expected stateful averages sc,st sagg = StatefulAverage() ers = [ sagg(iv[0:i + 3][-10:]) for i in range(0, 3 * int(len(iv) / 3), 3) ] tester.contents(sc, ers) tester.tuple_check(st, TimedStatefulAverageChecker()) # Must eventually aggregate on the last 17 items in iv # but only if cr otherwise the final marker stops # the window before the final trigger if self.is_cr(): tester.eventual_result( st, lambda av: True if av[1] == sagg(iv[-17:])[1] else None) # Expected non-stateful averages nsc ernsc = [ sum(iv[0:i + 13][-19:]) for i in range(0, 13 * int(len(iv) / 13), 13) ] tester.contents(nsc, ernsc) tester.test(self.test_ctxtype, self.test_config)
def _test_insert_consistent_region(self): print('\n---------' + str(self)) name = 'test_insert_consistent_region' topo = Topology(name) self._add_toolkits(topo, None) # configuration of consistent region trigger period trigger_period = 10 num_expected_tuples = 8000 num_resets = 2 run_for = 120 # in seconds beacon = op.Source(topo, "spl.utility::Beacon", 'tuple<int64 id, rstring val>', params={ 'period': 0.01, 'iterations': num_expected_tuples }) beacon.id = beacon.output('(int64)IterationCount()') beacon.val = beacon.output(spltypes.rstring('CR_TEST')) beacon.stream.set_consistent( ConsistentRegionConfig.periodic(trigger_period)) es.insert(beacon.stream, connection=self.connection, database=self.database, table='StreamsCRTable', primary_key='id', partitioning_key='id', front_end_connection_flag=False, user=self.es_user, password=self.es_password, truststore=self.es_truststore, truststore_password=self.es_truststore_password, keystore=self.es_keystore, keystore_password=self.es_keystore_password) #self._build_only(name, topo) tester = Tester(topo) tester.run_for(run_for) tester.resets(num_resets) # minimum number of resets for each region cfg = {} # change trace level job_config = streamsx.topology.context.JobConfig(tracing='warn') job_config.add(cfg) cfg[streamsx.topology.context.ConfigParams.SSL_VERIFY] = False tester.test(self.test_ctxtype, cfg, always_collect_logs=True) print(str(tester.result))
def test_enter_exit(self): iterations = 3000 reset_count = 5 topo = Topology() s = topo.source(TimeCounter(iterations=iterations, period=0.01)) s.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) v = VerifyEnterExit(reset_count + 1, "VerifyEnterExit") tester = Tester(topo) tester.contents(s, range(0,iterations)) tester.resets(reset_count) tester.add_condition(s, v) tester.test(self.test_ctxtype, self.test_config)
def test_for_each(self): iterations = 3000 reset_count = 5 topo = Topology() s = topo.source(TimeCounter(iterations=iterations, period=0.01)) if self.is_cr(): s.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) s.for_each(VerifyNumericOrder()) tester = Tester(topo) tester.contents(s, range(0,iterations)) if self.is_cr(): tester.resets(reset_count) tester.test(self.test_ctxtype, self.test_config)
def test_aggregate(self): iterations = 3000 reset_count = 5 topo = Topology() # Generate integers from [0,3000) s = topo.source(TimeCounter(iterations=iterations, period=0.01)) if self.is_cr(): s.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) # Filter the odd ones s = s.filter(StatefulEvenFilter()) # Halve the even ones and add one. Now have integers [1,(iterations/2)) s = s.map(StatefulHalfPlusOne()) sc = s.last(10).trigger(3).aggregate(StatefulAverage()) st = s.last(17).trigger(datetime.timedelta(seconds=2)).aggregate(StatefulAverage()) # non-stateful aggregation functions nsc = s.last(19).trigger(13).aggregate(lambda tuples : sum(tuples)) tester = Tester(topo) if self.is_cr(): tester.resets(reset_count) # Find the expected results. # mimic the processing using Python builtins iv = filter(StatefulEvenFilter(), range(iterations)) iv = list(map(StatefulHalfPlusOne(), iv)) # Expected stateful averages sc,st sagg = StatefulAverage() ers = [ sagg(iv[0:i+3][-10:]) for i in range(0, 3*int(len(iv)/3), 3) ] tester.contents(sc, ers) tester.tuple_check(st, TimedStatefulAverageChecker()) # Must eventually aggregate on the last 17 items in iv # but only if cr otherwise the final marker stops # the window before the final trigger if self.is_cr(): tester.eventual_result(st, lambda av : True if av[1] == sagg(iv[-17:])[1] else None) # Expected non-stateful averages nsc ernsc = [ sum(iv[0:i+13][-19:]) for i in range(0, 13*int(len(iv)/13), 13) ] tester.contents(nsc, ernsc) tester.test(self.test_ctxtype, self.test_config)
def test_filter_map(self): iterations = 3000 topo = Topology() streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy')) timeCounter = op.Source(topo, "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={'iterations':iterations,'period':0.01}) timeCounter.stream.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) evenFilter = op.Map("com.ibm.streamsx.topology.pytest.checkpoint::StatefulEvenFilter", timeCounter.stream, None, params={}) hpo = op.Map("com.ibm.streamsx.topology.pytest.checkpoint::StatefulHalfPlusOne", evenFilter.stream, None, params={}) s = hpo.stream tester = Tester(topo) tester.resets() tester.tuple_count(s, iterations/2) tester.contents(s, list(zip(range(1,int((iterations/2)+1))))) tester.test(self.test_ctxtype, self.test_config)
def test_primitive_foreach(self): iterations=3000 topo = Topology() topo.checkpoint_period = timedelta(seconds=1) streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy')) timeCounter = op.Source(topo, "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={'iterations':iterations,'period':0.01}) timeCounter.stream.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) fizzbuzz = op.Map("com.ibm.streamsx.topology.pytest.checkpoint::FizzBuzzPrimitive", timeCounter.stream, schema.StreamSchema('tuple<int32 f, rstring c>').as_tuple()) verify = op.Sink("com.ibm.streamsx.topology.pytest.checkpoint::Verify", fizzbuzz.stream) s = fizzbuzz.stream tester = Tester(topo) tester.resets() tester.tuple_count(s, iterations) tester.test(self.test_ctxtype, self.test_config)
def test_aggregate_partitioned_stateful(self): iterations = 3000 reset_count = 5 topo = Topology() # Generate integers from [0,3000) s = topo.source(TimeCounter(iterations=iterations, period=0.01)) if (self.is_cr()): s.set_consistent( ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) # Find the collatz (hotpo) sequence length s = s.map(lambda x: CollatzLength(x + 1)) count = 10 trigger = 3 sc = s.last(count).trigger(trigger).partition( GreatestSoFar()).aggregate(max) #sc.print() #streamsx.topology.context.submit('TOOLKIT', topo) tester = Tester(topo) if (self.is_cr()): tester.resets(reset_count) # Find the expected results. # mimic the processing using Python builtins iv = list(map(lambda x: CollatzLength(x + 1), range(iterations))) def q(l): partitions = [[], []] gsf = GreatestSoFar() for i in l: partition = gsf(i) partitions[partition].append(i) if len(partitions[partition]) % trigger == 0: yield max(partitions[partition][-count:]) tester.contents(sc, q(iv)) tester.test(self.test_ctxtype, self.test_config)
def test_source(self): iterations = 3000 topo = Topology() streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy')) bop = op.Source(topo, "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={'iterations':iterations,'period':0.01}) s = bop.stream s.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) tester = Tester(topo) tester.resets() tester.tuple_count(s, iterations) tester.contents(s, list(zip(range(0,iterations)))) # job_config = streamsx.topology.context.JobConfig(tracing='debug') # job_config.add(self.test_config) tester.test(self.test_ctxtype, self.test_config)
def test_hash_adder(self): iterations=3000 reset_count=5 topo = Topology() s = topo.source(TimeCounter(iterations=iterations, period=0.01)) if self.is_cr(): s.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) width = 3 s = s.parallel(width, Routing.HASH_PARTITIONED, StatefulStupidHash()) s = s.map(lambda x: x + 23) s = s.end_parallel() expected = [v + 23 for v in range(iterations)] tester = Tester(topo) if self.is_cr(): tester.resets(reset_count) tester.contents(s, expected, ordered=width==1) tester.test(self.test_ctxtype, self.test_config)
def test_source(self): iterations = 3000 reset_count = 5 topo = Topology() s = topo.source(TimeCounter(iterations=iterations, period=0.01)) s.set_consistent( ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) tester = Tester(topo) tester.contents(s, range(0, iterations)) tester.resets(reset_count) # cfg={} # job_config = streamsx.topology.context.JobConfig(tracing='debug') # job_config.add(self.test_config) tester.test(self.test_ctxtype, self.test_config)
def test_beacon(self): # An operator-driven consistent region can be used with a source # that supports it, such as Beacon iterations = 5000 topo = Topology() beacon = op.Source(topo, "spl.utility::Beacon", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={'iterations':iterations,'period':0.01,'triggerCount':streamsx.spl.types.uint32(500)}) beacon.f = beacon.output('(int32)IterationCount()') s = beacon.stream s.set_consistent(ConsistentRegionConfig.operator_driven(drain_timeout=40, reset_timeout=40, max_consecutive_attempts=4)) tester = Tester(topo) # For operator-driven regions, the resetter uses a random interval # from 10-40 seconds for resets. Only one is likely to be completed # while processing tuples for this test. tester.resets(1) tester.tuple_count(s, iterations) tester.contents(s, list(zip(range(0,iterations)))) tester.test(self.test_ctxtype, self.test_config)
def test_map_foreach(self): iterations = 3000 topo = Topology() streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy')) timeCounter = op.Source( topo, "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={ 'iterations': iterations, 'period': 0.01 }) timeCounter.stream.set_consistent( ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) fizzbuzz = op.Map( "com.ibm.streamsx.topology.pytest.checkpoint::FizzBuzzMap", timeCounter.stream, schema.StreamSchema('tuple<int32 f, rstring c>').as_tuple()) verify = op.Sink("com.ibm.streamsx.topology.pytest.checkpoint::Verify", fizzbuzz.stream) s = fizzbuzz.stream tester = Tester(topo) tester.resets() tester.tuple_count(s, iterations) # Find the expected results. fizz = lambda x: (x[0], x[1] + 'fizz' if x[0] % 3 == 0 else x[1]) buzz = lambda x: (x[0], x[1] + 'buzz' if x[0] % 5 == 0 else x[1]) expected = list( map(buzz, (map(fizz, (map(lambda x: (x, ''), range(iterations))))))) tester.contents(s, expected) tester.test(self.test_ctxtype, self.test_config)
def test_map_foreach(self): iterations = 3000 topo = Topology() streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy')) timeCounter = op.Source(topo, "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={'iterations':iterations,'period':0.01}) timeCounter.stream.set_consistent(ConsistentRegionConfig.periodic(5 , drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) fizzbuzz = op.Map("com.ibm.streamsx.topology.pytest.checkpoint::FizzBuzzMap", timeCounter.stream, schema.StreamSchema('tuple<int32 f, rstring c>').as_tuple()) verify = op.Sink("com.ibm.streamsx.topology.pytest.checkpoint::Verify", fizzbuzz.stream) s = fizzbuzz.stream tester = Tester(topo) tester.resets() tester.tuple_count(s, iterations) # Find the expected results. fizz=lambda x: (x[0], x[1]+'fizz' if x[0] % 3 == 0 else x[1]) buzz=lambda x: (x[0], x[1]+'buzz' if x[0] % 5 == 0 else x[1]) expected = list (map (buzz, (map (fizz, (map (lambda x: (x,''), range(iterations))))))) tester.contents(s, expected) tester.test(self.test_ctxtype, self.test_config)
def test_aggregate_partitioned(self): iterations = 3000 reset_count = 5 topo = Topology() # Generate integers from [0,3000) s = topo.source(TimeCounter(iterations=iterations, period=0.01)) if (self.is_cr()): s.set_consistent( ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) # Filter the odd ones s = s.filter(StatefulEvenFilter()) # Halve the even ones and add one. Now have integers [1,(iterations/2)) s = s.map(StatefulHalfPlusOne()) sc = s.last(10).trigger(3).partition(lambda x: x % 2).aggregate( StatefulAverage()) tester = Tester(topo) if (self.is_cr()): tester.resets(reset_count) # Find the expected results. # mimic the processing using Python builtins iv = filter(StatefulEvenFilter(), range(iterations)) iv = list(map(StatefulHalfPlusOne(), iv)) # Expected stateful averages sc,st sagg = StatefulAverage() ers = [ sagg(iv[i % 2:i + 5 - i % 2:2][-10:]) for i in range(0, 3 * int(len(iv) / 3), 3) ] tester.contents(sc, ers) tester.test(self.test_ctxtype, self.test_config)
def test_flat_map(self): count = 1000 reset_count = 5 topo = Topology(); lines = topo.source(ListIterator(["All work","and no play","makes Jack","a dull boy"], period=0.01, count=count)) if self.is_cr(): lines.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) words = lines.flat_map(StatefulSplit()) tester = Tester(topo) if self.is_cr(): tester.resets(reset_count) # Find the expected results. flat_contents = ["All","work","and","no","play","makes","Jack","a","dull","boy"] # repeat the contents 1000 times. expected = [] for i in range(count): expected.extend(flat_contents) tester.contents(words, expected) tester.test(self.test_ctxtype, self.test_config)
def test_aggregate(self): # If the number of iterations is changed, keep it a multiple of six, # or improve the expected results generation below. iterations = 3000 reset_count = 5 topo = Topology() # Generate integers from [0,3000) s = topo.source(TimeCounter(iterations=iterations, period=0.01)) s.set_consistent( ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6)) # Filter the odd ones s = s.filter(StatefulEvenFilter()) # Halve the even ones and add one. Now have integers [1,(iterations/2)) s = s.map(StatefulHalfPlusOne()) s = s.last(10).trigger(3).aggregate(StatefulAverage()) tester = Tester(topo) tester.resets(reset_count) # Find the expected results. # The first three values (until the window fills) are special. expected = [2.0, 3.5, 5.0] # The rest start at 7.5 and increase by three until 1495.5. # Assuming that the number of iterations is a multiple of six, # the final trigger happens at the last tuple. There will be # ten values in the window, from (iterations/2 - 9) to (iterations/2). # The average is then ((iterations/2 - 9) + (iterations/2))/2. # For 3000 iterations, that works out to 1495.5 end = float(iterations) / 2.0 - 4.5 expected.extend( itertools.takewhile(lambda x: x <= end, itertools.count(7.5, 3))) tester.contents(s, expected) tester.test(self.test_ctxtype, self.test_config)
class Test(unittest.TestCase): """ Test invocations of composite operators in local Streams instance """ @classmethod def setUpClass(self): print(str(self)) print("Setup Elasticsearch client ...") # ES client expects ES_URL environment variable with URL to Compose Elasticsearch service, e.g. https://user:[email protected]:port/ es_url = os.environ['ES_URL'] print(str(es_url)) self._es = Elasticsearch([es_url], verify_certs=False) self._indexName = 'test-index-cloud' urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) creds = urlparse(es_url) self._es_user_name = creds.username self._es_password = creds.password self._es_node_list = creds.hostname + ':' + str(creds.port) def setUp(self): Tester.setup_distributed(self) self.elasticsearch_toolkit_location = "../../com.ibm.streamsx.elasticsearch" def tearDown(self): self._es.indices.delete(index=self._indexName, ignore=[400, 404]) def _add_toolkits(self, topo, test_toolkit): tk.add_toolkit(topo, test_toolkit) if self.elasticsearch_toolkit_location is not None: tk.add_toolkit(topo, self.elasticsearch_toolkit_location) def _build_launch_app(self, name, composite_name, parameters, num_result_tuples, test_toolkit, exact=True, run_for=60, resets=0): print("------ " + name + " ------") topo = Topology(name) self._add_toolkits(topo, test_toolkit) params = parameters # Call the test composite test_op = op.Source(topo, composite_name, 'tuple<rstring result>', params=params) self.tester = Tester(topo) self.tester.run_for(run_for) if (resets > 0): self.tester.resets( resets ) # minimum number of resets for each region, requires v1.11 of topology toolkit self.tester.tuple_count(test_op.stream, num_result_tuples, exact=False) cfg = {} # change trace level job_config = streamsx.topology.context.JobConfig(tracing='info') job_config.add(cfg) if ("TestCloud" not in str(self)): cfg[streamsx.topology.context.ConfigParams.SSL_VERIFY] = False # Run the test test_res = self.tester.test(self.test_ctxtype, cfg, assert_on_fail=True, always_collect_logs=True) print(str(self.tester.result)) assert test_res, name + " FAILED (" + self.tester.result[ "application_logs"] + ")" def _validate_count(self, indexName, expectedNum): # check the count count = self._es.count(index=indexName, doc_type='_doc', body={"query": { "match_all": {} }}) print("Count: " + str(count['count'])) assert ( count['count'] == expectedNum ), "Wrong tuple count (expected=" + str(expectedNum) + "): " + str( count['count']) def _run_shell_command_line(self, command): process = Popen(command, universal_newlines=True, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() return stdout, stderr, process.returncode def _create_app_config(self): if ("TestICP" in str(self) or "TestCloud" in str(self)): print("Ensure that application configuration 'es' is created.") else: if streams_install_env_var(): print( "Create elasticsearch application configuration with streamtool" ) this_dir = os.path.dirname(os.path.realpath(__file__)) app_dir = this_dir + '/es_test' stdout, stderr, err = self._run_shell_command_line( 'export ES_NODES=' + self._es_node_list + ';' + 'export ES_USER='******';' + 'export ES_PASSWORD='******';' + 'cd ' + app_dir + '; make configure') print(str(err)) # ------------------------------------ # CONSISTENT REGION test with TopologyTester: # Resets triggered by ConsistentRegionResetter and Beacon re-submits the tuples def test_consistent_region_with_resets(self): self._indexName = 'test-index-cr' self._create_app_config() # delete index before launching Streams job self._es.indices.delete(index=self._indexName, ignore=[400, 404]) numResets = 3 runFor = 150 numTuples = 300 # num generated tuples drainPeriod = 5.0 self._build_launch_app( "test_consistent_region_with_resets", "com.ibm.streamsx.elasticsearch.test::TestConsistentRegionAppConfig", { 'indexName': self._indexName, 'drainPeriod': drainPeriod, 'numTuples': numTuples }, numTuples, 'es_test', False, runFor, numResets) self._validate_count(self._indexName, numTuples) # ------------------------------------ def test_bulk(self): self._indexName = 'test-index-bulk' self._create_app_config() # delete index before launching Streams job self._es.indices.delete(index=self._indexName, ignore=[400, 404]) numTuples = 20000 # num generated tuples bulkSize = 1000 self._build_launch_app( "test_bulk", "com.ibm.streamsx.elasticsearch.test::TestBulk", { 'indexName': self._indexName, 'numTuples': numTuples, 'bulkSize': bulkSize }, numTuples, 'es_test') self._validate_count(self._indexName, numTuples)