def _test_insert_consistent_region(self): print('\n---------' + str(self)) name = 'test_insert_consistent_region' topo = Topology(name) self._add_toolkits(topo, None) # configuration of consistent region trigger period trigger_period = 10 num_expected_tuples = 8000 num_resets = 2 run_for = 120 # in seconds beacon = op.Source(topo, "spl.utility::Beacon", 'tuple<int64 id, rstring val>', params={ 'period': 0.01, 'iterations': num_expected_tuples }) beacon.id = beacon.output('(int64)IterationCount()') beacon.val = beacon.output(spltypes.rstring('CR_TEST')) beacon.stream.set_consistent( ConsistentRegionConfig.periodic(trigger_period)) es.insert(beacon.stream, connection=self.connection, database=self.database, table='StreamsCRTable', primary_key='id', partitioning_key='id', front_end_connection_flag=False, user=self.es_user, password=self.es_password, truststore=self.es_truststore, truststore_password=self.es_truststore_password, keystore=self.es_keystore, keystore_password=self.es_keystore_password) #self._build_only(name, topo) tester = Tester(topo) tester.run_for(run_for) tester.resets(num_resets) # minimum number of resets for each region cfg = {} # change trace level job_config = streamsx.topology.context.JobConfig(tracing='warn') job_config.add(cfg) cfg[streamsx.topology.context.ConfigParams.SSL_VERIFY] = False tester.test(self.test_ctxtype, cfg, always_collect_logs=True) print(str(tester.result))
def test_insert_with_app_config(self): print('\n---------' + str(self)) self._create_app_config() topo = Topology('test_insert_with_app_config') self._add_toolkits(topo, None) topo.add_pip_package('streamsx.eventstore') s = self._create_stream(topo) result_schema = StreamSchema( 'tuple<int32 id, rstring name, boolean _Inserted_>') res = es.insert( s, config='eventstore', table='SampleTable', schema=result_schema, primary_key='id', partitioning_key='id', front_end_connection_flag=self.front_end_connection_flag, truststore=self.es_truststore, keystore=self.es_keystore) res.print() tester = Tester(topo) tester.run_for(120) tester.tuple_count(res, 20, exact=True) cfg = {} job_config = streamsx.topology.context.JobConfig(tracing='info') job_config.add(cfg) cfg[streamsx.topology.context.ConfigParams.SSL_VERIFY] = False tester.test(self.test_ctxtype, cfg, always_collect_logs=True) print(str(tester.result))
def test_insert(self): print ('\n---------'+str(self)) name = 'test_insert' topo = Topology(name) streamsx.spl.toolkit.add_toolkit(topo, self.es_toolkit) s = self._create_stream(topo) res = es.insert(s, config='eventstore', table='SampleTablePy', primary_key='id', ssl_connection=False, plugin_flag=False) # build only self._build_only(name, topo)
def test_insert_udp(self): print('\n---------' + str(self)) topo = Topology('test_insert_udp') self._add_toolkits(topo, None) topo.add_pip_package('streamsx.eventstore') s = self._create_stream(topo) result_schema = StreamSchema( 'tuple<int32 id, rstring name, boolean _Inserted_>') # user-defined parallelism with two channels (two EventStoreSink operators) res = es.insert( s.parallel(2), table='SampleTable', database=self.database, connection=self.connection, schema=result_schema, primary_key='id', partitioning_key='id', front_end_connection_flag=self.front_end_connection_flag, user=self.es_user, password=self.es_password, truststore=self.es_truststore, truststore_password=self.es_truststore_password, keystore=self.es_keystore, keystore_password=self.es_keystore_password) res = res.end_parallel() res.print() #self._build_only('test_insert_udp', topo) tester = Tester(topo) tester.run_for(120) tester.tuple_count(res, 20, exact=True) cfg = {} job_config = streamsx.topology.context.JobConfig(tracing='info') job_config.add(cfg) cfg[streamsx.topology.context.ConfigParams.SSL_VERIFY] = False tester.test(self.test_ctxtype, cfg, always_collect_logs=True) print(str(tester.result))
def test_param(self): topo = Topology() s = topo.source(['Hello World']).as_string() es.insert(s, connection='9.26.150.75:1101', database='sample_db', table='sample_table') es.insert(s, connection='9.26.150.75:1101', database='sample_db', table='sample_table', batch_size=100, max_num_active_batches=5) es.insert(s, connection='9.26.150.75:1101', database='sample_db', table='sample_table', batch_size=100, max_num_active_batches=5, front_end_connection_flag=True) es.insert(s, connection='9.26.150.75:1101', database='sample_db', table='sample_table', batch_size=100, max_num_active_batches=5, plugin_flag=True) es.insert(s, connection='9.26.150.75:1101', database='sample_db', table='sample_table', batch_size=100, max_num_active_batches=5, plugin_flag=False) es.insert(s, connection='9.26.150.75:1101', database='sample_db', table='sample_table', batch_size=100, max_num_active_batches=5, plugin_flag='false') es.insert(s, connection='9.26.150.75:1101', database='sample_db', table='sample_table', batch_size=100, max_num_active_batches=5, ssl_connection=False)