def test_op_driven_attempts(self):

        # negative
        with self.assertRaises(ValueError):
            config = ConsistentRegionConfig.operator_driven(max_consecutive_attempts=-1)

        # zero
        with self.assertRaises(ValueError):
            config = ConsistentRegionConfig.operator_driven(max_consecutive_attempts=0)

        # one
        config = ConsistentRegionConfig.operator_driven(max_consecutive_attempts=1)
        self.assertEqual(config.max_consecutive_attempts, 1)

        # exactly 0x7FFFFFFF
        config = ConsistentRegionConfig.operator_driven(max_consecutive_attempts=0x7FFFFFFF)
        self.assertEqual(config.max_consecutive_attempts, 0x7FFFFFFF)

        # greater than 0x7FFFFFF
        with self.assertRaises(ValueError):
            config = ConsistentRegionConfig.operator_driven(max_consecutive_attempts=0x80000000)

        # float literal, exactly integral
        config = ConsistentRegionConfig.operator_driven(max_consecutive_attempts=12.0)

        self.assertEqual(config.max_consecutive_attempts, 12.0)

        # not exactly integral
        with self.assertRaises(ValueError):
            config = ConsistentRegionConfig.operator_driven(max_consecutive_attempts=12.2)

        # a string that can be cast to a valid integral value
        config = ConsistentRegionConfig.operator_driven(max_consecutive_attempts="14")

        self.assertEqual(config.max_consecutive_attempts, "14")
    def test_periodic_period(self):

        # No period
        with self.assertRaises(ValueError):
            config = ConsistentRegionConfig(ConsistentRegionConfig.Trigger.PERIODIC)

        # zero (float)
        with self.assertRaises(ValueError):
            config = ConsistentRegionConfig.periodic(0.0)

        # negative (float)
        with self.assertRaises(ValueError):
            config = ConsistentRegionConfig.periodic(-1.0)

        # timedelta
        config = ConsistentRegionConfig.periodic(timedelta(seconds=41))
        self.assertEqual(config.period, timedelta(seconds=41))

        # zero (timedelta)
        with self.assertRaises(ValueError):
            config = ConsistentRegionConfig.periodic(timedelta(seconds=0))

        # negative (timedelta)
        with self.assertRaises(ValueError):
            config = ConsistentRegionConfig.periodic(timedelta(seconds=-8))

        # can cast to float
        config = ConsistentRegionConfig.periodic("4")
        self.assertEqual(config.period, "4")

        # cannot cast to float
        with self.assertRaises(ValueError):
            config = ConsistentRegionConfig.periodic("table")
Beispiel #3
0
    def test_beacon(self):
        # An operator-driven consistent region can be used with a source
        # that supports it, such as Beacon
        iterations = 5000
        topo = Topology()

        beacon = op.Source(topo,
                           "spl.utility::Beacon",
                           schema.StreamSchema('tuple<int32 f>').as_tuple(),
                           params={
                               'iterations': iterations,
                               'period': 0.01,
                               'triggerCount': streamsx.spl.types.uint32(500)
                           })
        beacon.f = beacon.output('(int32)IterationCount()')

        s = beacon.stream
        s.set_consistent(
            ConsistentRegionConfig.operator_driven(drain_timeout=40,
                                                   reset_timeout=40,
                                                   max_consecutive_attempts=4))

        tester = Tester(topo)
        # For operator-driven regions, the resetter uses a random interval
        # from 10-40 seconds for resets.  Only one is likely to be completed
        # while processing tuples for this test.
        tester.resets(1)
        tester.tuple_count(s, iterations)
        tester.contents(s, list(zip(range(0, iterations))))

        tester.test(self.test_ctxtype, self.test_config)
Beispiel #4
0
    def test_source(self):

        topo = Topology()

        streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy'))
        bop = op.Source(
            topo,
            "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter",
            schema.StreamSchema('tuple<int32 f>').as_tuple(),
            params={
                'iterations': 30,
                'period': 0.1
            })

        s = bop.stream
        s.set_consistent(
            ConsistentRegionConfig.operator_driven(drain_timeout=40,
                                                   reset_timeout=40,
                                                   max_consecutive_attempts=3))

        tester = Tester(topo)

        self.assertFalse(
            tester.test(self.test_ctxtype,
                        self.test_config,
                        assert_on_fail=False))
Beispiel #5
0
    def test_primitive_foreach(self):
        iterations = 3000
        topo = Topology()

        topo.checkpoint_period = timedelta(seconds=1)
        streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy'))
        timeCounter = op.Source(
            topo,
            "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter",
            schema.StreamSchema('tuple<int32 f>').as_tuple(),
            params={
                'iterations': iterations,
                'period': 0.01
            })
        timeCounter.stream.set_consistent(
            ConsistentRegionConfig.periodic(5,
                                            drain_timeout=40,
                                            reset_timeout=40,
                                            max_consecutive_attempts=6))

        fizzbuzz = op.Map(
            "com.ibm.streamsx.topology.pytest.checkpoint::FizzBuzzPrimitive",
            timeCounter.stream,
            schema.StreamSchema('tuple<int32 f, rstring c>').as_tuple())
        verify = op.Sink("com.ibm.streamsx.topology.pytest.checkpoint::Verify",
                         fizzbuzz.stream)
        s = fizzbuzz.stream
        tester = Tester(topo)
        tester.resets()
        tester.tuple_count(s, iterations)

        tester.test(self.test_ctxtype, self.test_config)
 def test_op_driven(self):
     config = ConsistentRegionConfig.operator_driven()
     self.assertEqual(config.trigger, ConsistentRegionConfig.Trigger.OPERATOR_DRIVEN)
     # verify the correct defaults have been applied
     self.assertEqual(config.drain_timeout, self._DEFAULT_DRAIN_TIMEOUT)
     self.assertEqual(config.reset_timeout, self._DEFAULT_RESET_TIMEOUT)
     self.assertEqual(config.max_consecutive_attempts, self._DEFAULT_ATTEMPTS)
    def test_enter_exit(self):
        topo = Topology()
        streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy'))
        source = op.Source(topo, 'com.ibm.streamsx.topology.pytest.checkpoint::EnterExitSource', schema.StreamSchema('tuple<rstring from, int32 enter, int32 exit>').as_tuple(), params={'period':0.1})
        source.stream.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6))

        transit = op.Map('com.ibm.streamsx.topology.pytest.checkpoint::EnterExitMap', source.stream, schema.StreamSchema('tuple<rstring from, int32 enter, int32 exit>').as_tuple())

        tester = Tester(topo)
        tester.resets(10)

        # On each operator, __enter__ and __exit__ should be called once for 
        # each reset.  Also __enter__ should be called at startup and __exit__
        # at shutdown.  It is hard to verify the final __exit__ call (and that
        # is handled by python rather than our code), so 
        # the test is valid if the number of __enter__ calls is one more than
        # the number of resets, and the number of __exit__ calls is equal to
        # number of resets.  The tuples on the two streams indicate the number
        # of times __enter__ and __exit__ have been called. 
        # We are looking for two specific tuples:
        # ('source', 6, 5) and ('transit', 6, 5)
        tester.eventual_result(source.stream, lambda tuple_ : True if tuple_[1] >= 6 and tuple_[1] == tuple_[2] + 1 else Fale if tuple_[1] != tuple_[2] + 1 else None)
        tester.eventual_result(transit.stream, lambda tuple_ : True if tuple_[1] >= 6 and tuple_[1] == tuple_[2] + 1 else Fale if tuple_[1] != tuple_[2] + 1 else None)

        job_config = streamsx.topology.context.JobConfig(tracing='debug')
        job_config.add(self.test_config)

        tester.test(self.test_ctxtype, self.test_config)
Beispiel #8
0
    def test_enter_exit(self):
        topo = Topology()
        streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy'))
        source = op.Source(topo, 'com.ibm.streamsx.topology.pytest.checkpoint::EnterExitSource', schema.StreamSchema('tuple<rstring from, int32 enter, int32 exit>').as_tuple(), params={'period':0.1})
        source.stream.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6))

        transit = op.Map('com.ibm.streamsx.topology.pytest.checkpoint::EnterExitMap', source.stream, schema.StreamSchema('tuple<rstring from, int32 enter, int32 exit>').as_tuple())

        tester = Tester(topo)
        tester.resets(10)

        # On each operator, __enter__ and __exit__ should be called once for 
        # each reset.  Also __enter__ should be called at startup and __exit__
        # at shutdown.  It is hard to verify the final __exit__ call (and that
        # is handled by python rather than our code), so 
        # the test is valid if the number of __enter__ calls is one more than
        # the number of resets, and the number of __exit__ calls is equal to
        # number of resets.  The tuples on the two streams indicate the number
        # of times __enter__ and __exit__ have been called. 
        # We are looking for two specific tuples:
        # ('source', 6, 5) and ('transit', 6, 5)
        tester.eventual_result(source.stream, lambda tuple_ : True if tuple_[1] >= 6 and tuple_[1] == tuple_[2] + 1 else Fale if tuple_[1] != tuple_[2] + 1 else None)
        tester.eventual_result(transit.stream, lambda tuple_ : True if tuple_[1] >= 6 and tuple_[1] == tuple_[2] + 1 else Fale if tuple_[1] != tuple_[2] + 1 else None)

        job_config = streamsx.topology.context.JobConfig(tracing='debug')
        job_config.add(self.test_config)

        tester.test(self.test_ctxtype, self.test_config)
    def test_flat_map(self):
        count = 1000
        reset_count = 5
        topo = Topology()

        lines = topo.source(
            ListIterator(
                ["All work", "and no play", "makes Jack", "a dull boy"],
                period=0.01,
                count=count))

        if self.is_cr():
            lines.set_consistent(
                ConsistentRegionConfig.periodic(5,
                                                drain_timeout=40,
                                                reset_timeout=40,
                                                max_consecutive_attempts=6))

        words = lines.flat_map(StatefulSplit())
        tester = Tester(topo)
        if self.is_cr():
            tester.resets(reset_count)

        # Find the expected results.
        flat_contents = [
            "All", "work", "and", "no", "play", "makes", "Jack", "a", "dull",
            "boy"
        ]
        # repeat the contents 1000 times.
        expected = []
        for i in range(count):
            expected.extend(flat_contents)
        tester.contents(words, expected)
        tester.test(self.test_ctxtype, self.test_config)
Beispiel #10
0
 def test_opdriven_source(self):
     topo = Topology()
     
     s = topo.source(TimeCounter(iterations=30, period=0.1))
     s.set_consistent(ConsistentRegionConfig.operator_driven(drain_timeout=40, reset_timeout=40, max_consecutive_attempts=3))
     tester = Tester(topo)
     self.assertFalse(tester.test(self.test_ctxtype, self.test_config, assert_on_fail=False))
Beispiel #11
0
    def test_source(self):
        iterations = 3000
        topo = Topology()

        streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy'))
        bop = op.Source(
            topo,
            "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter",
            schema.StreamSchema('tuple<int32 f>').as_tuple(),
            params={
                'iterations': iterations,
                'period': 0.01
            })

        s = bop.stream
        s.set_consistent(
            ConsistentRegionConfig.periodic(5,
                                            drain_timeout=40,
                                            reset_timeout=40,
                                            max_consecutive_attempts=6))

        tester = Tester(topo)
        tester.resets()
        tester.tuple_count(s, iterations)
        tester.contents(s, list(zip(range(0, iterations))))

        # job_config = streamsx.topology.context.JobConfig(tracing='debug')
        # job_config.add(self.test_config)

        tester.test(self.test_ctxtype, self.test_config)
    def test_aggregate(self):
        iterations = 3000
        reset_count = 5
        topo = Topology()
        # Generate integers from [0,3000)
        s = topo.source(TimeCounter(iterations=iterations, period=0.01))
        if self.is_cr():
            s.set_consistent(
                ConsistentRegionConfig.periodic(5,
                                                drain_timeout=40,
                                                reset_timeout=40,
                                                max_consecutive_attempts=6))

        # Filter the odd ones
        s = s.filter(StatefulEvenFilter())
        # Halve the even ones and add one.  Now have integers [1,(iterations/2))
        s = s.map(StatefulHalfPlusOne())

        sc = s.last(10).trigger(3).aggregate(StatefulAverage())
        st = s.last(17).trigger(timedelta(seconds=2)).aggregate(
            StatefulAverage())

        # non-stateful aggregation functions
        nsc = s.last(19).trigger(13).aggregate(lambda tuples: sum(tuples))

        tester = Tester(topo)
        if self.is_cr():
            tester.resets(reset_count)

        # Find the expected results.
        # mimic the processing using Python builtins
        iv = filter(StatefulEvenFilter(), range(iterations))
        iv = list(map(StatefulHalfPlusOne(), iv))

        # Expected stateful averages sc,st
        sagg = StatefulAverage()
        ers = [
            sagg(iv[0:i + 3][-10:]) for i in range(0, 3 * int(len(iv) / 3), 3)
        ]
        tester.contents(sc, ers)

        tester.tuple_check(st, TimedStatefulAverageChecker())

        # Must eventually aggregate on the last 17 items in iv
        # but only if cr otherwise the final marker stops
        # the window before the final trigger
        if self.is_cr():
            tester.eventual_result(
                st, lambda av: True if av[1] == sagg(iv[-17:])[1] else None)

        # Expected non-stateful averages nsc
        ernsc = [
            sum(iv[0:i + 13][-19:])
            for i in range(0, 13 * int(len(iv) / 13), 13)
        ]
        tester.contents(nsc, ernsc)

        tester.test(self.test_ctxtype, self.test_config)
    def test_periodic(self):
        config = ConsistentRegionConfig.periodic(14.0)
        self.assertEqual(config.trigger, ConsistentRegionConfig.Trigger.PERIODIC)
        self.assertEqual(config.period, 14.0)

        # verify the correct defaults have been applied
        self.assertEqual(config.drain_timeout, self._DEFAULT_DRAIN_TIMEOUT)
        self.assertEqual(config.reset_timeout, self._DEFAULT_RESET_TIMEOUT)
        self.assertEqual(config.max_consecutive_attempts, self._DEFAULT_ATTEMPTS)
Beispiel #14
0
    def test_opdriven_flat_map(self):
        topo = Topology();

        lines = topo.source(ListIterator(["mary had a little lamb", "its fleece was white as snow"]))

        # slow things down so checkpoints can be taken.
        lines = lines.filter(StatefulDelay(0.5)) 
        words = lines.flat_map(StatefulSplit())
        words.set_consistent(ConsistentRegionConfig.operator_driven(drain_timeout=40, reset_timeout=40, max_consecutive_attempts=3))
        tester = Tester(topo)
        self.assertFalse(tester.test(self.test_ctxtype, self.test_config, assert_on_fail=False))
    def test_source(self):

        topo = Topology()

        streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy'))
        bop = op.Source(topo, "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={'iterations':30,'period':0.1})

        s = bop.stream
        s.set_consistent(ConsistentRegionConfig.operator_driven(drain_timeout=40, reset_timeout=40, max_consecutive_attempts=3))
         
        tester = Tester(topo)

        self.assertFalse(tester.test(self.test_ctxtype, self.test_config, assert_on_fail=False))
Beispiel #16
0
    def test_opdriven_map(self):
        topo = Topology()
        # Generate integers from [0,30)
        s = topo.source(TimeCounter(iterations=30, period=0.1))

        # Filter the odd ones 
        s = s.filter(StatefulEvenFilter())
        # Halve the even ones and add one.  Now have integers [1,15)
        s = s.map(StatefulHalfPlusOne())
        s.set_consistent(ConsistentRegionConfig.operator_driven(drain_timeout=40, reset_timeout=40, max_consecutive_attempts=3))
        s = s.last(10).trigger(2).aggregate(StatefulAverage())
        tester = Tester(topo)
        self.assertFalse(tester.test(self.test_ctxtype, self.test_config, assert_on_fail=False))
Beispiel #17
0
    def test_enter_exit(self):
        iterations = 3000
        reset_count = 5
        topo = Topology()
        s = topo.source(TimeCounter(iterations=iterations, period=0.01))
        s.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6))
        
        v = VerifyEnterExit(reset_count + 1, "VerifyEnterExit")
        tester = Tester(topo)
        tester.contents(s, range(0,iterations))
        tester.resets(reset_count)
        tester.add_condition(s, v)

        tester.test(self.test_ctxtype, self.test_config)
    def _test_insert_consistent_region(self):
        print('\n---------' + str(self))
        name = 'test_insert_consistent_region'
        topo = Topology(name)
        self._add_toolkits(topo, None)
        # configuration of consistent region trigger period
        trigger_period = 10
        num_expected_tuples = 8000
        num_resets = 2
        run_for = 120  # in seconds

        beacon = op.Source(topo,
                           "spl.utility::Beacon",
                           'tuple<int64 id, rstring val>',
                           params={
                               'period': 0.01,
                               'iterations': num_expected_tuples
                           })
        beacon.id = beacon.output('(int64)IterationCount()')
        beacon.val = beacon.output(spltypes.rstring('CR_TEST'))
        beacon.stream.set_consistent(
            ConsistentRegionConfig.periodic(trigger_period))

        es.insert(beacon.stream,
                  connection=self.connection,
                  database=self.database,
                  table='StreamsCRTable',
                  primary_key='id',
                  partitioning_key='id',
                  front_end_connection_flag=False,
                  user=self.es_user,
                  password=self.es_password,
                  truststore=self.es_truststore,
                  truststore_password=self.es_truststore_password,
                  keystore=self.es_keystore,
                  keystore_password=self.es_keystore_password)

        #self._build_only(name, topo)

        tester = Tester(topo)
        tester.run_for(run_for)
        tester.resets(num_resets)  # minimum number of resets for each region

        cfg = {}
        # change trace level
        job_config = streamsx.topology.context.JobConfig(tracing='warn')
        job_config.add(cfg)
        cfg[streamsx.topology.context.ConfigParams.SSL_VERIFY] = False
        tester.test(self.test_ctxtype, cfg, always_collect_logs=True)
        print(str(tester.result))
Beispiel #19
0
    def test_for_each(self):
        iterations = 3000
        reset_count = 5
        topo = Topology()
        s = topo.source(TimeCounter(iterations=iterations, period=0.01))
        if self.is_cr():
            s.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6))

        s.for_each(VerifyNumericOrder())

        tester = Tester(topo)
        tester.contents(s, range(0,iterations))
        if self.is_cr():
            tester.resets(reset_count)
        tester.test(self.test_ctxtype, self.test_config)
    def test_aggregate(self):
        iterations = 3000
        reset_count = 5
        topo = Topology()
        # Generate integers from [0,3000)
        s = topo.source(TimeCounter(iterations=iterations, period=0.01))
        if self.is_cr():
            s.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6))

        # Filter the odd ones 
        s = s.filter(StatefulEvenFilter())
        # Halve the even ones and add one.  Now have integers [1,(iterations/2))
        s = s.map(StatefulHalfPlusOne())

        sc = s.last(10).trigger(3).aggregate(StatefulAverage())
        st = s.last(17).trigger(datetime.timedelta(seconds=2)).aggregate(StatefulAverage())

        # non-stateful aggregation functions
        nsc = s.last(19).trigger(13).aggregate(lambda tuples : sum(tuples))

        tester = Tester(topo)
        if self.is_cr():
            tester.resets(reset_count)

        # Find the expected results.
        # mimic the processing using Python builtins
        iv = filter(StatefulEvenFilter(), range(iterations))
        iv = list(map(StatefulHalfPlusOne(), iv))

        # Expected stateful averages sc,st
        sagg = StatefulAverage()
        ers = [ sagg(iv[0:i+3][-10:]) for i in range(0, 3*int(len(iv)/3), 3) ]
        tester.contents(sc, ers)

        tester.tuple_check(st, TimedStatefulAverageChecker())

        # Must eventually aggregate on the last 17 items in iv
        # but only if cr otherwise the final marker stops
        # the window before the final trigger
        if self.is_cr():
            tester.eventual_result(st, lambda av : True if av[1] == sagg(iv[-17:])[1] else None)

        # Expected non-stateful averages nsc
        ernsc = [ sum(iv[0:i+13][-19:]) for i in range(0, 13*int(len(iv)/13), 13) ]
        tester.contents(nsc, ernsc)

        tester.test(self.test_ctxtype, self.test_config)
Beispiel #21
0
    def test_filter_map(self):
        iterations = 3000
        topo = Topology()

        streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy'))
        timeCounter = op.Source(topo, "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={'iterations':iterations,'period':0.01})
        timeCounter.stream.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6))
 
        evenFilter = op.Map("com.ibm.streamsx.topology.pytest.checkpoint::StatefulEvenFilter", timeCounter.stream, None, params={})
        hpo = op.Map("com.ibm.streamsx.topology.pytest.checkpoint::StatefulHalfPlusOne", evenFilter.stream, None, params={})
        s = hpo.stream
        tester = Tester(topo)
        tester.resets()
        tester.tuple_count(s, iterations/2)
        tester.contents(s, list(zip(range(1,int((iterations/2)+1)))))

        tester.test(self.test_ctxtype, self.test_config)
    def test_filter_map(self):
        iterations = 3000
        topo = Topology()

        streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy'))
        timeCounter = op.Source(topo, "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={'iterations':iterations,'period':0.01})
        timeCounter.stream.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6))
 
        evenFilter = op.Map("com.ibm.streamsx.topology.pytest.checkpoint::StatefulEvenFilter", timeCounter.stream, None, params={})
        hpo = op.Map("com.ibm.streamsx.topology.pytest.checkpoint::StatefulHalfPlusOne", evenFilter.stream, None, params={})
        s = hpo.stream
        tester = Tester(topo)
        tester.resets()
        tester.tuple_count(s, iterations/2)
        tester.contents(s, list(zip(range(1,int((iterations/2)+1)))))

        tester.test(self.test_ctxtype, self.test_config)
    def test_primitive_foreach(self):
        iterations=3000
        topo = Topology()

        topo.checkpoint_period = timedelta(seconds=1)
        streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy'))
        timeCounter = op.Source(topo, "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={'iterations':iterations,'period':0.01})
        timeCounter.stream.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6))

        fizzbuzz = op.Map("com.ibm.streamsx.topology.pytest.checkpoint::FizzBuzzPrimitive", timeCounter.stream, schema.StreamSchema('tuple<int32 f, rstring c>').as_tuple())
        verify = op.Sink("com.ibm.streamsx.topology.pytest.checkpoint::Verify", fizzbuzz.stream)
        s = fizzbuzz.stream
        tester = Tester(topo)
        tester.resets()
        tester.tuple_count(s, iterations)

        tester.test(self.test_ctxtype, self.test_config)
    def test_aggregate_partitioned_stateful(self):
        iterations = 3000
        reset_count = 5
        topo = Topology()
        # Generate integers from [0,3000)
        s = topo.source(TimeCounter(iterations=iterations, period=0.01))
        if (self.is_cr()):
            s.set_consistent(
                ConsistentRegionConfig.periodic(5,
                                                drain_timeout=40,
                                                reset_timeout=40,
                                                max_consecutive_attempts=6))

        # Find the collatz (hotpo) sequence length
        s = s.map(lambda x: CollatzLength(x + 1))

        count = 10
        trigger = 3
        sc = s.last(count).trigger(trigger).partition(
            GreatestSoFar()).aggregate(max)

        #sc.print()
        #streamsx.topology.context.submit('TOOLKIT', topo)

        tester = Tester(topo)
        if (self.is_cr()):
            tester.resets(reset_count)

        # Find the expected results.
        # mimic the processing using Python builtins
        iv = list(map(lambda x: CollatzLength(x + 1), range(iterations)))

        def q(l):
            partitions = [[], []]
            gsf = GreatestSoFar()
            for i in l:
                partition = gsf(i)
                partitions[partition].append(i)
                if len(partitions[partition]) % trigger == 0:
                    yield max(partitions[partition][-count:])

        tester.contents(sc, q(iv))
        tester.test(self.test_ctxtype, self.test_config)
    def test_source(self):
        iterations = 3000
        topo = Topology()

        streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy'))
        bop = op.Source(topo, "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={'iterations':iterations,'period':0.01})

        s = bop.stream
        s.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6))
        
        tester = Tester(topo)
        tester.resets()
        tester.tuple_count(s, iterations)
        tester.contents(s, list(zip(range(0,iterations))))

        # job_config = streamsx.topology.context.JobConfig(tracing='debug')
        # job_config.add(self.test_config)

        tester.test(self.test_ctxtype, self.test_config)
Beispiel #26
0
    def test_hash_adder(self):
        iterations=3000
        reset_count=5
        topo = Topology()
        s = topo.source(TimeCounter(iterations=iterations, period=0.01))
        if self.is_cr():
            s.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6))

        width = 3
        s = s.parallel(width, Routing.HASH_PARTITIONED, StatefulStupidHash())
        s = s.map(lambda x: x + 23)
        s = s.end_parallel()

        expected = [v + 23 for v in range(iterations)]

        tester = Tester(topo)
        if self.is_cr():
            tester.resets(reset_count)
        tester.contents(s, expected, ordered=width==1)
        tester.test(self.test_ctxtype, self.test_config)
    def test_beacon(self):
        # An operator-driven consistent region can be used with a source
        # that supports it, such as Beacon
        iterations = 5000
        topo = Topology()

        beacon = op.Source(topo, "spl.utility::Beacon", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={'iterations':iterations,'period':0.01,'triggerCount':streamsx.spl.types.uint32(500)})
        beacon.f = beacon.output('(int32)IterationCount()')

        s = beacon.stream
        s.set_consistent(ConsistentRegionConfig.operator_driven(drain_timeout=40, reset_timeout=40, max_consecutive_attempts=4))
         
        tester = Tester(topo)
        # For operator-driven regions, the resetter uses a random interval
        # from 10-40 seconds for resets.  Only one is likely to be completed
        # while processing tuples for this test.
        tester.resets(1)
        tester.tuple_count(s, iterations)
        tester.contents(s, list(zip(range(0,iterations))))

        tester.test(self.test_ctxtype, self.test_config)
Beispiel #28
0
    def test_map_foreach(self):
        iterations = 3000
        topo = Topology()

        streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy'))
        timeCounter = op.Source(topo, "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={'iterations':iterations,'period':0.01})
        timeCounter.stream.set_consistent(ConsistentRegionConfig.periodic(5 , drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6))

        fizzbuzz = op.Map("com.ibm.streamsx.topology.pytest.checkpoint::FizzBuzzMap", timeCounter.stream, schema.StreamSchema('tuple<int32 f, rstring c>').as_tuple())
        verify = op.Sink("com.ibm.streamsx.topology.pytest.checkpoint::Verify", fizzbuzz.stream)
        s = fizzbuzz.stream

        tester = Tester(topo)
        tester.resets()
        tester.tuple_count(s, iterations)
        # Find the expected results.
        fizz=lambda x: (x[0], x[1]+'fizz' if x[0] % 3 == 0 else x[1])
        buzz=lambda x: (x[0], x[1]+'buzz' if x[0] % 5 == 0 else x[1])
        expected = list (map (buzz, (map (fizz, (map (lambda x: (x,''), range(iterations)))))))
        tester.contents(s, expected)

        tester.test(self.test_ctxtype, self.test_config)
    def test_map_foreach(self):
        iterations = 3000
        topo = Topology()

        streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy'))
        timeCounter = op.Source(topo, "com.ibm.streamsx.topology.pytest.checkpoint::TimeCounter", schema.StreamSchema('tuple<int32 f>').as_tuple(), params={'iterations':iterations,'period':0.01})
        timeCounter.stream.set_consistent(ConsistentRegionConfig.periodic(5 , drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6))

        fizzbuzz = op.Map("com.ibm.streamsx.topology.pytest.checkpoint::FizzBuzzMap", timeCounter.stream, schema.StreamSchema('tuple<int32 f, rstring c>').as_tuple())
        verify = op.Sink("com.ibm.streamsx.topology.pytest.checkpoint::Verify", fizzbuzz.stream)
        s = fizzbuzz.stream

        tester = Tester(topo)
        tester.resets()
        tester.tuple_count(s, iterations)
        # Find the expected results.
        fizz=lambda x: (x[0], x[1]+'fizz' if x[0] % 3 == 0 else x[1])
        buzz=lambda x: (x[0], x[1]+'buzz' if x[0] % 5 == 0 else x[1])
        expected = list (map (buzz, (map (fizz, (map (lambda x: (x,''), range(iterations)))))))
        tester.contents(s, expected)

        tester.test(self.test_ctxtype, self.test_config)
    def test_aggregate_partitioned(self):
        iterations = 3000
        reset_count = 5
        topo = Topology()
        # Generate integers from [0,3000)
        s = topo.source(TimeCounter(iterations=iterations, period=0.01))
        if (self.is_cr()):
            s.set_consistent(
                ConsistentRegionConfig.periodic(5,
                                                drain_timeout=40,
                                                reset_timeout=40,
                                                max_consecutive_attempts=6))

        # Filter the odd ones
        s = s.filter(StatefulEvenFilter())
        # Halve the even ones and add one.  Now have integers [1,(iterations/2))
        s = s.map(StatefulHalfPlusOne())

        sc = s.last(10).trigger(3).partition(lambda x: x % 2).aggregate(
            StatefulAverage())

        tester = Tester(topo)
        if (self.is_cr()):
            tester.resets(reset_count)

        # Find the expected results.
        # mimic the processing using Python builtins
        iv = filter(StatefulEvenFilter(), range(iterations))
        iv = list(map(StatefulHalfPlusOne(), iv))

        # Expected stateful averages sc,st
        sagg = StatefulAverage()
        ers = [
            sagg(iv[i % 2:i + 5 - i % 2:2][-10:])
            for i in range(0, 3 * int(len(iv) / 3), 3)
        ]

        tester.contents(sc, ers)
        tester.test(self.test_ctxtype, self.test_config)
    def test_flat_map(self):
        count = 1000
        reset_count = 5
        topo = Topology();

        lines = topo.source(ListIterator(["All work","and no play","makes Jack","a dull boy"], period=0.01, count=count))

        if self.is_cr():
            lines.set_consistent(ConsistentRegionConfig.periodic(5, drain_timeout=40, reset_timeout=40, max_consecutive_attempts=6))

        words = lines.flat_map(StatefulSplit())
        tester = Tester(topo)
        if self.is_cr():
            tester.resets(reset_count)

        # Find the expected results.
        flat_contents = ["All","work","and","no","play","makes","Jack","a","dull","boy"]
        # repeat the contents 1000 times.
        expected = []
        for i in range(count):
            expected.extend(flat_contents)
        tester.contents(words, expected)
        tester.test(self.test_ctxtype, self.test_config)
    def test_op_driven_drain_timeout(self):
        with self.assertRaises(ValueError):
            config = ConsistentRegionConfig.operator_driven(drain_timeout=-1)

        # timedelta, positive
        config = ConsistentRegionConfig.operator_driven(drain_timeout=timedelta(seconds=1))
        self.assertEqual(config.drain_timeout, timedelta(seconds=1))

        # timedelta, zero
        with self.assertRaises(ValueError):
            config = ConsistentRegionConfig.operator_driven(drain_timeout=timedelta(seconds=0))

        # timedelta, negative
        with self.assertRaises(ValueError):
            config = ConsistentRegionConfig.operator_driven(drain_timeout=timedelta(seconds=-1))

        # can cast to float
        config = ConsistentRegionConfig.operator_driven(drain_timeout="8.2")
        self.assertEqual(config.drain_timeout, "8.2")

        # cannot cast to float
        with self.assertRaises(ValueError):
            config = ConsistentRegionConfig.operator_driven(drain_timeout="clogged")
 def test_op_driven_simple_attempts(self):
     config = ConsistentRegionConfig.operator_driven(max_consecutive_attempts=14)
     self.assertEqual(config.trigger, ConsistentRegionConfig.Trigger.OPERATOR_DRIVEN)
     self.assertEqual(config.drain_timeout, self._DEFAULT_DRAIN_TIMEOUT)
     self.assertEqual(config.reset_timeout, self._DEFAULT_RESET_TIMEOUT)
     self.assertEqual(config.max_consecutive_attempts, 14)
 def test_op_driven_all(self):
     config = ConsistentRegionConfig.operator_driven(max_consecutive_attempts=14, drain_timeout=1, reset_timeout=2)
     self.assertEqual(config.trigger, ConsistentRegionConfig.Trigger.OPERATOR_DRIVEN)
     self.assertEqual(config.drain_timeout, 1)
     self.assertEqual(config.reset_timeout, 2)
     self.assertEqual(config.max_consecutive_attempts, 14)
Beispiel #35
0
 def add_stateful(self, topo, streams):
     for s in streams:
         s.set_consistent(ConsistentRegionConfig.periodic(0.5))
 def test_no_trigger(self):
     with self.assertRaises(ValueError):
         config = ConsistentRegionConfig()