def test_max_item_size(self): """ Test :return: """ # Alloc self.redis_cache = RedisCache(max_single_item_bytes=2) # Put self.assertTrue( self.redis_cache.put(self.key_prefix + "keyA", b"aa", 60000)) self.assertFalse( self.redis_cache.put(self.key_prefix + "keyB", b"aaa", 60000)) logger.info("ms cur=%s", SolBase.mscurrent()) logger.info("A : %s", self.redis_cache.get(self.key_prefix + "keyA")) logger.info("B : %s", self.redis_cache.get(self.key_prefix + "keyB")) # A : must be present # B : must be out of cache self.assertEqual(self.redis_cache.get(self.key_prefix + "keyA"), b"aa") self.assertIsNone(self.redis_cache.get(self.key_prefix + "keyB")) self.assertEqual(Meters.aig("rcs.cache_put"), 1) self.assertEqual(Meters.aig("rcs.cache_get_hit"), 2) self.assertEqual(Meters.aig("rcs.cache_get_miss"), 2) # Stop self.redis_cache.stop_cache() self.redis_cache = None
def test_basic_ttl(self): """ Test :return: """ # Alloc self.redis_cache = RedisCache() # Put self.redis_cache.put(self.key_prefix + "keyA", b"valA", 60000) self.redis_cache.put(self.key_prefix + "keyB", b"valB", 1000) logger.info("ms cur=%s", SolBase.mscurrent()) logger.info("A : %s", self.redis_cache.get(self.key_prefix + "keyA")) logger.info("B : %s", self.redis_cache.get(self.key_prefix + "keyB")) # Wait a bit SolBase.sleep(2000) logger.info("ms after sleep=%s", SolBase.mscurrent()) # A : must be present # B : must be evicted (TTL elapsed) self.assertEqual(self.redis_cache.get(self.key_prefix + "keyA"), b"valA") self.assertIsNone(self.redis_cache.get(self.key_prefix + "keyB")) self.assertEqual(Meters.aig("rcs.cache_put"), 2) self.assertEqual(Meters.aig("rcs.cache_get_hit"), 3) self.assertEqual(Meters.aig("rcs.cache_get_miss"), 1) # Stop self.redis_cache.stop_cache() self.redis_cache = None
def __str__(self): """ Str override :return str :rtype: str """ return "id={0}*put/bypass/hit/miss={1}/{2}/{3}/{4}*ex={5}".format( id(self), Meters.aig(self.meters_prefix + "rcs.cache_put"), Meters.aig(self.meters_prefix + "rcs.cache_put_too_big"), Meters.aig(self.meters_prefix + "rcs.cache_get_hit"), Meters.aig(self.meters_prefix + "rcs.cache_get_miss"), Meters.aig(self.meters_prefix + "rcs.cache_ex"), )
def test_basic_eviction_with_get_ttl(self): """ Test :return: """ # Alloc self.mem_cache = MemoryCache(cb_evict=self.eviction_callback) # Put self.mem_cache.put("keyA", b"valA", 60000) self.mem_cache.put("keyB", b"valB", 500) logger.info("ms cur=%s", SolBase.mscurrent()) logger.info("A : %s", self.mem_cache.get_raw("keyA")) logger.info("B : %s", self.mem_cache.get_raw("keyB")) # Wait a bit SolBase.sleep(600) logger.info("ms after sleep=%s", SolBase.mscurrent()) # A : must be present # B : must be evicted (TTL elapsed) self.assertEqual(self.mem_cache.get("keyA"), b"valA") self.assertIsNone(self.mem_cache.get("keyB")) self.assertEqual(self.evict_count, 1) self.assertEqual(self.evict_last_key, "keyB") self.assertEqual(self.evict_last_value, b"valB") self.assertEqual(Meters.aig("mcs.cache_evict_ttl_get"), 1) # Stop self.mem_cache.stop_cache() self.mem_cache = None
def test_basic_eviction_max_capacity_lru(self): """ Test :return: """ # Alloc self.mem_cache = MemoryCache(max_item=3, cb_evict=self.eviction_callback) # Put self.mem_cache.put("keyA", b"valA", 60000) self.mem_cache.put("keyB", b"valB", 60000) self.mem_cache.put("keyC", b"valC", 60000) # Use A and C => B becomes the older to be used self.mem_cache.get("keyA") self.mem_cache.get("keyC") # We are maxed (3 items) # Add D => B must be kicked self.mem_cache.put("keyD", b"valD", 60000) self.assertEqual(self.mem_cache.get("keyA"), b"valA") self.assertEqual(self.mem_cache.get("keyC"), b"valC") self.assertEqual(self.mem_cache.get("keyD"), b"valD") self.assertIsNone(self.mem_cache.get("keyB")) self.assertEqual(self.evict_count, 1) self.assertEqual(self.evict_last_key, "keyB") self.assertEqual(self.evict_last_value, b"valB") self.assertEqual(Meters.aig("mcs.cache_evict_lru_put"), 1) # Stop self.mem_cache.stop_cache() self.mem_cache = None
def test_basic_eviction_with_watchdog_ttl(self): """ Test :return: """ # Go self.mem_cache = MemoryCache(watchdog_interval_ms=1000, cb_watchdog=self.watchdog_callback, cb_evict=self.eviction_callback) # Put self.mem_cache.put("keyA", b"valA", 60000) self.mem_cache.put("keyB", b"valB", 500) self.mem_cache.put("keyC", b"valC", 500) self.mem_cache.put("keyD", b"valD", 60000) logger.info("ms cur=%s", SolBase.mscurrent()) logger.info("A : %s", self.mem_cache.get_raw("keyA")) logger.info("B : %s", self.mem_cache.get_raw("keyB")) logger.info("C : %s", self.mem_cache.get_raw("keyC")) logger.info("D : %s", self.mem_cache.get_raw("keyD")) # Wait a bit ms_start = SolBase.mscurrent() while SolBase.msdiff(ms_start) < (1000.0 * 2.0): if self.callback_call > 0: break else: SolBase.sleep(10) logger.info("ms after wait=%s", SolBase.mscurrent()) logger.info("_hash_key = %s", self.mem_cache._hash_key) logger.info("_hash_context = %s", self.mem_cache._hash_context) # A : must be present # B : must be evicted (TTL elapsed, by watchdog) self.assertEqual(self.callback_call, 1) self.assertEqual(self.callback_evicted, 2) self.assertFalse("valB" in self.mem_cache._hash_key) self.assertFalse("valB" in self.mem_cache._hash_context) self.assertFalse("valC" in self.mem_cache._hash_key) self.assertFalse("valC" in self.mem_cache._hash_context) self.assertIsNone(self.mem_cache.get("keyB")) self.assertIsNone(self.mem_cache.get("keyC")) self.assertEqual(self.mem_cache.get("keyA"), b"valA") self.assertEqual(self.mem_cache.get("keyD"), b"valD") self.assertEqual(self.evict_count, 2) self.assertTrue(self.evict_last_key == "keyB" or self.evict_last_key == "keyC") self.assertTrue(self.evict_last_value == b"valB" or self.evict_last_value == b"valC") self.assertEqual(Meters.aig("mcs.cache_evict_ttl_watchdog"), 2) # Stop self.mem_cache.stop_cache() self.mem_cache = None
def test_pool_basic_bad_db(self): """ Test pool, basic """ MysqlApi.reset_pools() Meters.reset() d_conf = { "hosts": ["localhost", "127.0.0.1"], "port": 3306, "database": "no_db", "user": "******", "password": "******", "autocommit": True, } try: MysqlApi.exec_1(d_conf, "SELECT user, host FROM no_db.user LIMIT 1;") except Exception as e: logger.debug("Expected ex=%s", SolBase.extostr(e)) # Check it self.assertEquals(Meters.aig("k.db_pool.hash.cur"), 1) self.assertEquals(Meters.aig("k.db_pool.base.cur_size"), 0) self.assertEquals(Meters.aig("k.db_pool.base.call.connection_acquire"), 1) self.assertEquals(Meters.aig("k.db_pool.base.call.connection_release"), 1) self.assertEquals(Meters.aig("k.db_pool.mysql.call.__init"), 1) self.assertEquals( Meters.aig("k.db_pool.mysql.call._connection_create"), 1) self.assertEquals(Meters.aig("k.db_pool.mysql.call._get_connection"), 2) self.assertEquals(Meters.aig("k.db_pool.mysql.call._connection_close"), 2) self.assertEquals(Meters.aig("k.db_pool.mysql.hosts.deactivate_one"), 2) self.assertEquals(Meters.aig("k.db_pool.mysql.hosts.all_down"), 1)
def test_bench_greenlet_100_100_sleep(self): """ Test :return: """ self._go_greenlet(greenlet_count=100, pool_max=100, sql="SELECT SLEEP(1);") self.assertEquals(Meters.aig("k.db_pool.base.pool_maxed"), 0)
def __str__(self): """ Str override :return str :rtype: str """ return "id={0}*hash={1}/{2}*put/bypass/hit/miss={3}/{4}/{5}/{6}*evict.w/g/lru/lrusize={7}/{8}/{9}/{10}*ex={11}/{12}*bytes={13}".format( id(self), len(self._hash_key), len(self._hash_context), Meters.aig(self.meters_prefix + "mcs.cache_put"), Meters.aig(self.meters_prefix + "mcs.cache_put_too_big"), Meters.aig(self.meters_prefix + "mcs.cache_get_hit"), Meters.aig(self.meters_prefix + "mcs.cache_get_miss"), Meters.aig(self.meters_prefix + "mcs.cache_evict_ttl_watchdog"), Meters.aig(self.meters_prefix + "mcs.cache_evict_ttl_get"), Meters.aig(self.meters_prefix + "mcs.cache_evict_lru_put"), Meters.aig(self.meters_prefix + "mcs.cache_evict_lru_size_put"), Meters.aig(self.meters_prefix + "mcs.cache_ex"), Meters.aig(self.meters_prefix + "mcs.cache_purge_failed"), self._current_data_bytes.get())
def test_bench_greenlet_100_10_maxed_sleep(self): """ Test :return: """ self._go_greenlet(greenlet_count=100, pool_max=10, sql="SELECT SLEEP(1);", check_exception=False) # Must have some maxed self.assertGreater(Meters.aig("k.db_pool.base.pool_maxed"), 0)
def test_pool_basic_x2(self): """ Test pool, basic """ MysqlApi.reset_pools() Meters.reset() d_conf = { "hosts": ["localhost", "127.0.0.1"], "port": 3306, "database": None, "user": "******", "password": "******", "autocommit": True, } for _ in range(0, 10): MysqlApi.exec_1(d_conf, "SELECT user, host FROM mysql.user LIMIT 1;") d_conf = { "hosts": ["localhost", "localhost"], "port": 3306, "database": None, "user": "******", "password": "******", "autocommit": True, } for _ in range(0, 10): MysqlApi.exec_1(d_conf, "SELECT user, host FROM mysql.user LIMIT 1;") # Check it self.assertEquals(Meters.aig("k.db_pool.hash.cur"), 1 * 2) self.assertEquals(Meters.aig("k.db_pool.base.cur_size"), 1 * 2) self.assertEquals(Meters.aig("k.db_pool.base.call.connection_acquire"), 10 * 2) self.assertEquals(Meters.aig("k.db_pool.base.call.connection_release"), 10 * 2) self.assertEquals(Meters.aig("k.db_pool.mysql.call.__init"), 1 * 2) self.assertEquals( Meters.aig("k.db_pool.mysql.call._connection_create"), 1 * 2) self.assertEquals(Meters.aig("k.db_pool.mysql.call._get_connection"), 1 * 2) self.assertEquals(Meters.aig("k.db_pool.mysql.call._connection_ping"), 10 * 2)
def close_all(self): """ Close all connections """ n = 0 while not self.pool.empty(): conn = self.pool.get_nowait() self._connection_close(conn) n += 1 Meters.aii("k.db_pool.base.cur_size", increment_value=-n) Meters.ai("k.db_pool.base.max_size").set(max(Meters.aig("k.db_pool.base.max_size"), Meters.aig("k.db_pool.base.cur_size"))) self.size = 0
def test_pool_basic_host_multi(self): """ Test pool, basic """ MysqlApi.reset_pools() Meters.reset() d_conf = { "host": "localhost,127.0.0.1", "port": 3306, "database": None, "user": "******", "password": "******", "autocommit": True, } for _ in range(0, 10): MysqlApi.exec_1(d_conf, "SELECT user, host FROM mysql.user LIMIT 1;") # Check it self.assertEquals(Meters.aig("k.db_pool.hash.cur"), 1) self.assertEquals(Meters.aig("k.db_pool.base.cur_size"), 1) self.assertEquals(Meters.aig("k.db_pool.base.call.connection_acquire"), 10) self.assertEquals(Meters.aig("k.db_pool.base.call.connection_release"), 10) self.assertEquals(Meters.aig("k.db_pool.mysql.call.__init"), 1) self.assertEquals( Meters.aig("k.db_pool.mysql.call._connection_create"), 1) self.assertEquals(Meters.aig("k.db_pool.mysql.call._get_connection"), 1) self.assertEquals(Meters.aig("k.db_pool.mysql.call._connection_ping"), 10) s_hash = MysqlApi._get_pool_hash(d_conf) self.assertEquals(len(MysqlApi.D_POOL_INSTANCES[s_hash].host_status), 2) self.assertIn("localhost", MysqlApi.D_POOL_INSTANCES[s_hash].host_status) self.assertIn("127.0.0.1", MysqlApi.D_POOL_INSTANCES[s_hash].host_status)
def connection_acquire(self): """ Get a connection # TODO : In case client cannot release (greenlet kill) : add a spawn_later to protect pull exhaust (+ kill the connection in this case) # TODO : this requires a timeout by config (lets say 60 sec by default) :return: object :rtype object """ with self.pool_lock: Meters.aii("k.db_pool.base.call.connection_acquire") if self.pool.qsize() > 0: # ------------------------------ # GET CONNECTION FROM POOL # ------------------------------ conn = self.pool.get() # Ping it if not self._connection_ping(conn): # Failed => close it self._connection_close(conn) # Re-create a new one (we just closed a connection) conn = self._connection_create() # Send it back return conn elif self.size >= self.max_size: # ------------------------------ # POOL MAXED => ERROR # ------------------------------ Meters.aii("k.db_pool.base.pool_maxed") raise Exception("Pool maxed, size=%s, max_size=%s" % (self.size, self.max_size)) else: # ------------------------------ # POOL NOT MAXED, NO CONNECTION IN POOL => NEW CONNECTION # ------------------------------ try: conn = self._connection_create() self.size += 1 Meters.aii("k.db_pool.base.cur_size", increment_value=1) Meters.ai("k.db_pool.base.max_size").set(max(Meters.aig("k.db_pool.base.max_size"), Meters.aig("k.db_pool.base.cur_size"))) except Exception: raise return conn
def test_pool_basic_err(self): """ Test pool, basic """ MysqlApi.reset_pools() Meters.reset() d_conf = { "hosts": ["localhost", "127.0.0.1"], "port": 3306, "database": None, "user": "******", "password": "******", "autocommit": True, } for _ in range(0, 10): MysqlApi.exec_1(d_conf, "SELECT user, host FROM mysql.user LIMIT 1;") # Error try: MysqlApi.exec_1(d_conf, "SELECT zzz FROM mysql.no_table;") except ProgrammingError as e: logger.debug("Expected ex=%s", SolBase.extostr(e)) # Check it self.assertEquals(Meters.aig("k.db_pool.hash.cur"), 1) self.assertEquals(Meters.aig("k.db_pool.base.cur_size"), 1) self.assertEquals(Meters.aig("k.db_pool.base.call.connection_acquire"), 11) self.assertEquals(Meters.aig("k.db_pool.base.call.connection_release"), 11) self.assertEquals(Meters.aig("k.db_pool.mysql.call.__init"), 1) self.assertEquals( Meters.aig("k.db_pool.mysql.call._connection_create"), 1) self.assertEquals(Meters.aig("k.db_pool.mysql.call._get_connection"), 1) self.assertEquals(Meters.aig("k.db_pool.mysql.call._connection_ping"), 11)
def test_meters(self): """ Test """ ai1a = Meters.ai("ai1") self.assertIsInstance(ai1a, AtomicIntSafe) ai1b = Meters.ai("ai1") self.assertEqual(id(ai1a), id(ai1b)) ai1a = Meters.aii("ai1") self.assertEqual(ai1a.get(), 1) ai1a = Meters.aii("ai1", 2) self.assertEqual(ai1a.get(), 3) self.assertEqual(ai1a.get(), Meters.aig("ai1")) af1a = Meters.af("af1") self.assertIsInstance(af1a, AtomicFloatSafe) af1b = Meters.af("af1") self.assertEqual(id(af1a), id(af1b)) af1a = Meters.afi("af1") self.assertEqual(af1a.get(), 1.0) af1a = Meters.afi("af1", 2.0) self.assertEqual(af1a.get(), 3.0) self.assertEqual(af1a.get(), Meters.afg("af1")) dtc1a = Meters.dtc("dtc1") self.assertIsInstance(dtc1a, DelayToCountSafe) dtc1b = Meters.dtc("dtc1") self.assertEqual(id(dtc1a), id(dtc1b)) Meters.dtci("dtc1", 0) Meters.dtci("dtc1", 50) Meters.dtci("dtc1", 100) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[0].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[50].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[100].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[500].get(), 0) Meters.dtc("dtc1").to_dict() # Write Meters.write_to_logger()
def test_meters_to_udp(self): """ Test """ ai1a = Meters.ai("ai1") self.assertIsInstance(ai1a, AtomicIntSafe) ai1b = Meters.ai("ai1") self.assertEqual(id(ai1a), id(ai1b)) ai1a = Meters.aii("ai1") self.assertEqual(ai1a.get(), 1) ai1a = Meters.aii("ai1", 2) self.assertEqual(ai1a.get(), 3) self.assertEqual(ai1a.get(), Meters.aig("ai1")) af1a = Meters.af("af1") self.assertIsInstance(af1a, AtomicFloatSafe) af1b = Meters.af("af1") self.assertEqual(id(af1a), id(af1b)) af1a = Meters.afi("af1") self.assertEqual(af1a.get(), 1.0) af1a = Meters.afi("af1", 2.0) self.assertEqual(af1a.get(), 3.0) self.assertEqual(af1a.get(), Meters.afg("af1")) dtc1a = Meters.dtc("dtc1") self.assertIsInstance(dtc1a, DelayToCountSafe) dtc1b = Meters.dtc("dtc1") self.assertEqual(id(dtc1a), id(dtc1b)) Meters.dtci("dtc1", 0) Meters.dtci("dtc1", 50) Meters.dtci("dtc1", 100) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[0].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[50].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[100].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[500].get(), 0) # Write Meters.write_to_logger() # Serialize ar_json = Meters.meters_to_udp_format(send_pid=True, send_dtc=True) logger.info("Got ar_json=%s", ar_json) for cur_ar in ar_json: logger.info("Got cur_ar=%s", cur_ar) # Serialize, no dtc ar_json = Meters.meters_to_udp_format(send_pid=True, send_dtc=False) logger.info("Got ar_json=%s", ar_json) for cur_ar in ar_json: logger.info("Got cur_ar=%s", cur_ar) # Send to daemon (assuming its up locally) Meters.send_udp_to_knockdaemon() Meters.send_udp_to_knockdaemon(send_dtc=True) Meters.send_udp_to_knockdaemon(send_dtc=False) # ------------------------ # UDP Scheduler test # ------------------------ # Check self.assertIsNone(Meters.UDP_SCHEDULER_GREENLET) self.assertFalse(Meters.UDP_SCHEDULER_STARTED) # Start Meters.udp_scheduler_start(send_interval_ms=500) # Check self.assertIsNotNone(Meters.UDP_SCHEDULER_GREENLET) self.assertTrue(Meters.UDP_SCHEDULER_STARTED) # Start again Meters.udp_scheduler_start(send_interval_ms=500) # Check again self.assertIsNotNone(Meters.UDP_SCHEDULER_GREENLET) self.assertTrue(Meters.UDP_SCHEDULER_STARTED) # Interval is 500 => we sleep 3.250 sec, we assume we must have at least 500, 1000, 1500, 2000, 2500, 3000 run => 6 runs SolBase.sleep(3250) # Check self.assertGreaterEqual(Meters.aig("k.meters.udp.run.ok"), 6) self.assertEqual(Meters.aig("k.meters.udp.run.ex"), 0) self.assertIsNotNone(Meters.UDP_SCHEDULER_GREENLET) self.assertTrue(Meters.UDP_SCHEDULER_STARTED) # We stop Meters.udp_scheduler_stop() self.assertIsNone(Meters.UDP_SCHEDULER_GREENLET) self.assertFalse(Meters.UDP_SCHEDULER_STARTED) # Sleep again and check no more running cur_run = Meters.aig("k.meters.udp.run.ok") SolBase.sleep(2000) self.assertEqual(cur_run, Meters.aig("k.meters.udp.run.ok"))
def _go_greenlet(self, greenlet_count, put_count, get_count, bench_item_count): """ Doc :param greenlet_count: greenlet_count :param put_count: put_count :param get_count: get_count :param bench_item_count: bench_item_count """ g_event = None g_array = None try: # Settings g_count = greenlet_count g_ms = 10000 # Continue callback loop self.callback_return = True # Go self.redis_cache = RedisCache() # Item count self.bench_item_count = bench_item_count self.bench_put_weight = put_count self.bench_get_weight = get_count self.bench_ttl_min_ms = 1000 self.bench_ttl_max_ms = int(g_ms / 2) # Go self.run_event = Event() self.exception_raised = 0 self.open_count = 0 self.thread_running = AtomicIntSafe() self.thread_running_ok = AtomicIntSafe() # Item per greenlet item_per_greenlet = self.bench_item_count / g_count # Signal self.gorun_event = Event() # Alloc greenlet g_array = list() g_event = list() for _ in range(0, g_count): greenlet = Greenlet() g_array.append(greenlet) g_event.append(Event()) # Run them cur_idx = 0 for idx in range(0, len(g_array)): greenlet = g_array[idx] event = g_event[idx] greenlet.spawn(self._run_cache_bench, event, cur_idx, cur_idx + item_per_greenlet) cur_idx += item_per_greenlet SolBase.sleep(0) # Signal self.gorun_event.set() # Wait a bit dt = SolBase.mscurrent() while SolBase.msdiff(dt) < g_ms: SolBase.sleep(500) # Stat ms = SolBase.msdiff(dt) sec = float(ms / 1000.0) total_put = Meters.aig("rcs.cache_put") per_sec_put = round(float(total_put) / sec, 2) total_get = Meters.aig("rcs.cache_get_hit") + Meters.aig( "rcs.cache_get_miss") per_sec_get = round(float(total_get) / sec, 2) logger.info( "Running..., count=%s, run=%s, ok=%s, put/sec=%s get/sec=%s, cache=%s", self.open_count, self.thread_running.get(), self.thread_running_ok.get(), per_sec_put, per_sec_get, self.redis_cache) self.assertEqual(self.exception_raised, 0) # Over, signal logger.info("Signaling, count=%s", self.open_count) self.run_event.set() # Wait for g in g_event: g.wait(30.0) self.assertTrue(g.isSet()) g_event = None g_array = None # Log Meters.write_to_logger() finally: self.run_event.set() if g_event: for g in g_event: g.set() if g_array: for g in g_array: g.kill() if self.redis_cache: self.redis_cache.stop_cache() self.redis_cache = None
def _go_greenlet(self, greenlet_count, put_count, get_count, bench_item_count, watchdog_interval_ms=60000, max_item=128000, max_bytes=32 * 1024 * 1024, max_single_item_bytes=1 * 1024 * 1024, purge_min_bytes=8 * 1024 * 1024, purge_min_count=1000): """ Doc :param greenlet_count: greenlet_count :param put_count: put_count :param get_count: get_count :param bench_item_count: bench_item_count :param watchdog_interval_ms: watchdog_interval_ms :param max_item: max_item :param max_bytes: max_bytes :param max_single_item_bytes: max_single_item_bytes :param purge_min_bytes: purge_min_bytes :param purge_min_count: purge_min_count """ g_event = None g_array = None try: # Settings g_count = greenlet_count g_ms = 10000 # Continue callback loop self.callback_return = True # Go self.mem_cache = MemoryCache( watchdog_interval_ms=watchdog_interval_ms, max_item=max_item, max_bytes=max_bytes, max_single_item_bytes=max_single_item_bytes, purge_min_bytes=purge_min_bytes, purge_min_count=purge_min_count) # Item count self.bench_item_count = bench_item_count self.bench_put_weight = put_count self.bench_get_weight = get_count self.bench_ttl_min_ms = 1000 self.bench_ttl_max_ms = int(g_ms / 2) # Go self.run_event = Event() self.exception_raised = 0 self.open_count = 0 self.thread_running = AtomicIntSafe() self.thread_running_ok = AtomicIntSafe() # Item per greenlet item_per_greenlet = self.bench_item_count / g_count # Signal self.gorun_event = Event() # Alloc greenlet g_array = list() g_event = list() for _ in range(0, g_count): greenlet = Greenlet() g_array.append(greenlet) g_event.append(Event()) # Run them cur_idx = 0 for idx in range(0, len(g_array)): greenlet = g_array[idx] event = g_event[idx] greenlet.spawn(self._run_cache_bench, event, cur_idx, cur_idx + item_per_greenlet) cur_idx += item_per_greenlet SolBase.sleep(0) # Signal self.gorun_event.set() # Wait a bit dt = SolBase.mscurrent() while SolBase.msdiff(dt) < g_ms: SolBase.sleep(500) # Stat ms = SolBase.msdiff(dt) sec = float(ms / 1000.0) total_put = Meters.aig("mcs.cache_put") per_sec_put = round(float(total_put) / sec, 2) total_get = Meters.aig("mcs.cache_get_hit") + Meters.aig( "mcs.cache_get_miss") per_sec_get = round(float(total_get) / sec, 2) logger.info( "Running..., count=%s, run=%s, ok=%s, put/sec=%s get/sec=%s, cache=%s", self.open_count, self.thread_running.get(), self.thread_running_ok.get(), per_sec_put, per_sec_get, self.mem_cache) self.assertEqual(self.exception_raised, 0) # Over, signal logger.info("Signaling, count=%s", self.open_count) self.run_event.set() # Wait for g in g_event: g.wait(30.0) self.assertTrue(g.isSet()) g_event = None g_array = None # Log Meters.write_to_logger() finally: self.run_event.set() if g_event: for g in g_event: g.set() if g_array: for g in g_array: g.kill() if self.mem_cache: max_count = 0 total_size = 0 i = 0 for (k, v) in self.mem_cache._hash_key.items(): i += 1 total_size += len(k) + len(v[1]) if i < max_count: logger.info("%s => %s", k, v) self.assertEqual(total_size, self.mem_cache._current_data_bytes.get()) self.mem_cache.stop_cache() self.mem_cache = None
def test_tcp_svr_start_cli_connect_clidisco_clicanreco_deadlock(self): """ Test """ # Instance self.tcp_server = None try: # Start server self._start_server_and_check(deadlock=True) # Start client, check and stop client self._start_one_client_checkstop() # Check stats logger.info("TestLog : server : wait for timeout on stop calls") dt_start = SolBase.datecurrent() while SolBase.datediff(dt_start) < self.checkTimeOutMs: ok = True if Meters.aig( "tcp.server.client_remove_timeout_internal") != 1: ok = False elif Meters.aig( "tcp.server.client_remove_timeout_business") != 1: ok = False if ok: logger.info( "TestLog : server : wait for timeout on stop calls : done" ) break else: SolBase.sleep(int(self.checkTimeOutMs / 100)) # Check self.assertEqual( Meters.aig("tcp.server.client_remove_timeout_internal"), 1) self.assertEqual( Meters.aig("tcp.server.client_remove_timeout_business"), 1) # Start client, check and stop client self._start_one_client_checkstop() # Check stats logger.info("TestLog : server : wait for timeout on stop calls") dt_start = SolBase.datecurrent() while SolBase.datediff(dt_start) < self.checkTimeOutMs: ok = True if Meters.aig( "tcp.server.client_remove_timeout_internal") != 2: ok = False elif Meters.aig( "tcp.server.client_remove_timeout_business") != 2: ok = False if ok: logger.info( "TestLog : server : wait for timeout on stop calls : done" ) break else: SolBase.sleep(int(self.checkTimeOutMs / 100)) # Check self.assertEqual( Meters.aig("tcp.server.client_remove_timeout_internal"), 2) self.assertEqual( Meters.aig("tcp.server.client_remove_timeout_business"), 2) except Exception as e: logger.error("Exception in test, ex=%s", SolBase.extostr(e)) raise finally: if self.tcp_server: self.tcp_server.stop_server() if self.tcpClient: self.tcpClient.disconnect()
def get_client_ko_count(cls, log_ko=False, target_client=1): """ Test :param log_ko: If true, log ko enabled. :param target_client: Target client count. :return An integer. """ client_ko = 0 # Client : connect if Meters.aig("ping.client.client_connect_count") != target_client: client_ko += 1 if log_ko: logger.warning("failed : client_connect_count=%s", Meters.aig("ping.client.client_connect_count")) if Meters.aig("ping.client.client_connect_error") > 0: client_ko += 1 if log_ko: logger.warning("failed : client_connect_error=%s", Meters.aig("ping.client.client_connect_error")) if Meters.aig("ping.client.client_disconnect_count") > 0: client_ko += 1 if log_ko: logger.warning( "failed : client_disconnect_count=%s", Meters.aig("ping.client.client_disconnect_count")) # Client : hello if Meters.aig("ping.client.client_hello_sent") != target_client: client_ko += 1 if log_ko: logger.warning("failed : client_hello_sent=%s", Meters.aig("ping.client.client_hello_sent")) if Meters.aig( "ping.client.client_hello_server_reply") != target_client: client_ko += 1 if log_ko: logger.warning( "failed : client_hello_server_reply=%s", Meters.aig("ping.client.client_hello_server_reply")) if Meters.aig("ping.client.client_hello_server_timeout") > 0: client_ko += 1 if log_ko: logger.warning( "failed : client_hello_server_timeout=%s", Meters.aig("ping.client.client_hello_server_timeout")) # Client : ping if Meters.aig("ping.client.client_ping_sent") == 0: client_ko += 1 if log_ko: logger.warning("failed : client_ping_sent=%s", Meters.aig("ping.client.client_ping_sent")) if Meters.aig("ping.client.client_ping_server_reply") == 0: client_ko += 1 if log_ko: logger.warning( "failed : client_ping_server_reply=%s", Meters.aig("ping.client.client_ping_server_reply")) if Meters.aig( "ping.client.client_ping_server_reply_noping_ongoing") > 0: client_ko += 1 if log_ko: logger.warning( "failed : client_ping_server_reply_noping_ongoing=%s", Meters.aig( "ping.client.client_ping_server_reply_noping_ongoing")) if Meters.aig("ping.client.client_pingserver_timeout") > 0: client_ko += 1 if log_ko: logger.warning( "failed : client_pingserver_timeout=%s", Meters.aig("ping.client.client_pingserver_timeout")) # Client : send if Meters.aig("ping.client.client_send_error") > 0: client_ko += 1 if log_ko: logger.warning("failed : client_send_error=%s", Meters.aig("ping.client.client_send_error")) # Client : protocol if Meters.aig("ping.client.invalid_protocol") > 0: client_ko += 1 if log_ko: logger.warning("failed : invalid_protocol=%s", Meters.aig("ping.client.invalid_protocol")) # Client : Schedule if Meters.aig( "ping.client.schedule_client_hello_server_timeout_error") > 0: client_ko += 1 if log_ko: logger.warning( "failed : schedule_client_hello_server_timeout_error=%s", Meters.aig( "ping.client.schedule_client_hello_server_timeout_error" )) if Meters.aig("ping.client.schedule_client_ping_error") > 0: client_ko += 1 if log_ko: logger.warning( "failed : schedule_client_ping_error=%s", Meters.aig("ping.client.schedule_client_ping_error")) if Meters.aig("ping.client.schedule_client_pingtimeouterror") > 0: client_ko += 1 if log_ko: logger.warning( "failed : schedule_client_pingtimeouterror=%s", Meters.aig("ping.client.schedule_client_pingtimeouterror")) # Client : server if Meters.aig("ping.client.server_ping_receive") == 0: client_ko += 1 if log_ko: logger.warning("failed : server_ping_receive=%s", Meters.aig("ping.client.server_ping_receive")) if Meters.aig("ping.client.server_ping_reply") == 0: client_ko += 1 if log_ko: logger.warning("failed : server_ping_reply=%s", Meters.aig("ping.client.server_ping_reply")) # Exit return client_ko
def get_server_ko_count(cls, log_ko=False, target_client=1): """ Test :param log_ko: If true, log ko enabled. :param target_client: Target client count. :return An integer. """ server_ko = 0 # Stats : Schedule errors if Meters.aig("ping.server.scheduleClientHelloTimeOutError") > 0: server_ko += 1 if log_ko: logger.warning( "_getServerKoCount : failed : scheduleClientHelloTimeOutError=%s", Meters.aig("ping.server.scheduleClientHelloTimeOutError")) if Meters.aig("ping.server.scheduleServerPingError") > 0: server_ko += 1 if log_ko: logger.warning( "_getServerKoCount : failed : scheduleServerPingError=%s", Meters.aig("ping.server.scheduleServerPingError")) if Meters.aig("ping.server.scheduleServerPingTimeOutError") > 0: server_ko += 1 if log_ko: logger.warning( "_getServerKoCount : failed : scheduleServerPingTimeOutError=%s", Meters.aig("ping.server.scheduleServerPingTimeOutError")) # Stats : connect/disconnect requests count if Meters.aig("ping.server.serverStartCount") != target_client: server_ko += 1 if log_ko: logger.warning( "_getServerKoCount : failed : serverStartCount=%s", Meters.aig("ping.server.serverStartCount")) if Meters.aig("ping.server.serverStopSynchCount") > 0: server_ko += 1 if log_ko: logger.warning( "_getServerKoCount : failed : serverStopSynchCount=%s", Meters.aig("ping.server.serverStopSynchCount")) # Stats : start/send errors count if Meters.aig("ping.server.serverStartError") > 0: server_ko += 1 if log_ko: logger.warning( "_getServerKoCount : failed : serverStartError=%s", Meters.aig("ping.server.serverStartError")) if Meters.aig("ping.server.serverSendError") > 0: server_ko += 1 if log_ko: logger.warning( "_getServerKoCount : failed : serverSendError=%s", Meters.aig("ping.server.serverSendError")) # Stats : hello(s) if Meters.aig("ping.server.clientHelloReceived") == 0: server_ko += 1 if log_ko: logger.warning( "_getServerKoCount : failed : clientHelloReceived=%s", Meters.aig("ping.server.clientHelloReceived")) if Meters.aig("ping.server.client_hello_server_reply") == 0: server_ko += 1 if log_ko: logger.warning( "_getServerKoCount : failed : client_hello_server_reply=%s", Meters.aig("ping.server.client_hello_server_reply")) if Meters.aig("ping.server.clientHelloTimeOut") > 0: server_ko += 1 if log_ko: logger.warning( "_getServerKoCount : failed : clientHelloTimeOut=%s", Meters.aig("ping.server.clientHelloTimeOut")) # Stats : server pings if Meters.aig("ping.server.serverPingSent") == 0: server_ko += 1 if log_ko: logger.warning( "_getServerKoCount : failed : serverPingSent=%s", Meters.aig("ping.server.serverPingSent")) if Meters.aig("ping.server.serverPingClientReply") == 0: server_ko += 1 if log_ko: logger.warning( "_getServerKoCount : failed : serverPingClientReply=%s", Meters.aig("ping.server.serverPingClientReply")) if Meters.aig("ping.server.serverPingClientTimeOut") > 0: server_ko += 1 if log_ko: logger.warning( "_getServerKoCount : failed : serverPingClientTimeOut=%s", Meters.aig("ping.server.serverPingClientTimeOut")) if Meters.aig( "ping.server.serverPingServerClientReplyNoPingOngoing") > 0: server_ko += 1 if log_ko: logger.warning( "_getServerKoCount : failed : serverPingServerClientReplyNoPingOngoing=%s", Meters.aig( "ping.server.serverPingServerClientReplyNoPingOngoing") ) # Stats : client pings if Meters.aig("ping.server.clientPingReceive") == 0: server_ko += 1 if log_ko: logger.warning( "_getServerKoCount : failed : clientPingReceive=%s", Meters.aig("ping.server.clientPingReceive")) if Meters.aig("ping.server.client_ping_server_reply") == 0: server_ko += 1 if log_ko: logger.warning( "_getServerKoCount : failed : client_ping_server_reply=%s", Meters.aig("ping.server.client_ping_server_reply")) # Stats : invalid protocols if Meters.aig("ping.server.invalid_protocol") > 0: server_ko += 1 if log_ko: logger.warning( "_getServerKoCount : failed : invalid_protocol=%s", Meters.aig("ping.server.invalid_protocol")) # Exit return server_ko
def _start_multi_client_checkallping_stop(self): """ Test """ # Start self._start_multi_client(self.clientMaxCount) # Here we must wait # 1) Client : already connected, must # - send hello, have a reply # - send a ping, have a reply # - reply to one server ping # 2) Server # - receive a hello and reply # - send a ping, have a reply # - reply to one client ping # => We check using the static PingStatXXX flush_stat_inloop = False dt = SolBase.datecurrent() dt_stat = SolBase.datecurrent() dt_loop = SolBase.datecurrent() client_prev_completed_ping = 0 server_prev_completed_ping = 0 while SolBase.datediff(dt) < self.runTimeMs: # Wait SolBase.sleep(1000) # Client client_ko = PingTestTools.get_client_ko_count( False, self.clientMaxCount) # Server server_ko = PingTestTools.get_server_ko_count( False, self.clientMaxCount) # Total ping client_total_completed_ping = Meters.aig( "ping.client.client_ping_server_reply") server_total_completed_ping = Meters.aig( "ping.server.serverPingClientReply") # Current ping client_local_completed_ping = client_total_completed_ping - client_prev_completed_ping server_local_completed_ping = server_total_completed_ping - server_prev_completed_ping # Store prev client_prev_completed_ping = client_total_completed_ping server_prev_completed_ping = server_total_completed_ping # Elapsed ms elapsed_ms = SolBase.datediff(dt_loop) elapsed_total_ms = SolBase.datediff(dt) # Ping per sec. client_local_pps = (client_local_completed_ping / (elapsed_ms / 1000.0)) server_local_pps = (server_local_completed_ping / (elapsed_ms / 1000.0)) client_avg_pps = (client_total_completed_ping / (elapsed_total_ms / 1000.0)) server_avg_pps = (server_total_completed_ping / (elapsed_total_ms / 1000.0)) # Reset date dt_loop = SolBase.datecurrent() # Wait logger.info( "Running : ko=%s:%s, sec=%s/%s, cli.ping=%s, svr.ping=%s, exp.pps=%.2f, " "cli.pps=%.2f, svr.pps=%.2f, cli.aps=%.2f, svr.aps=%.2f", client_ko, server_ko, int(SolBase.datediff(dt) / 1000), int(self.runTimeMs / 1000), client_local_completed_ping, server_local_completed_ping, self.expectedPps, client_local_pps, server_local_pps, client_avg_pps, server_avg_pps) # Stat if flush_stat_inloop and SolBase.datediff( dt_stat) > self.statEveryMs: Meters.write_to_logger() dt_stat = SolBase.datecurrent() # Final check client_ko = PingTestTools.get_client_ko_count(True, self.clientMaxCount) server_ko = PingTestTools.get_server_ko_count(True, self.clientMaxCount) self.assertEqual(client_ko, 0) self.assertEqual(server_ko, 0) # Stop self._stop_multi_client()
def _go_greenlet(self, greenlet_count, pool_max, sql="SELECT user, host FROM mysql.user LIMIT 1;", check_exception=True): """ Doc :param greenlet_count: greenlet_count :type greenlet_count: int :param pool_max: Pool max size :type pool_max: int :param sql: str :type sql: str :param check_exception: bool :type check_exception: bool """ MysqlApi.reset_pools() Meters.reset() g_event = None g_array = None try: # Settings g_count = greenlet_count g_ms = 5000 # Go self.pool_max = pool_max self.run_event = Event() self.exception_raised = 0 self.pool_sql = sql self.thread_running = AtomicIntSafe() self.thread_running_ok = AtomicIntSafe() # Signal self.gorun_event = Event() # Alloc greenlet g_array = list() g_event = list() for _ in range(0, g_count): greenlet = Greenlet() g_array.append(greenlet) g_event.append(Event()) # Run them for idx in range(0, len(g_array)): greenlet = g_array[idx] event = g_event[idx] greenlet.spawn(self._run_mysql_bench, event) SolBase.sleep(0) # Signal self.gorun_event.set() # Wait a bit dt = SolBase.mscurrent() while SolBase.msdiff(dt) < g_ms: SolBase.sleep(1000) # Stat ms = SolBase.msdiff(dt) sec = float(ms / 1000.0) total_acquire = Meters.aig( "k.db_pool.base.call.connection_acquire") per_sec_acquire = round(float(total_acquire) / sec, 2) total_release = Meters.aig( "k.db_pool.base.call.connection_release") per_sec_release = round(float(total_release) / sec, 2) logger.info("Running..., run=%s, ok=%s, ps.ack/rel=%s/%s", self.thread_running.get(), self.thread_running_ok.get(), per_sec_acquire, per_sec_release) if check_exception: self.assertEqual(self.exception_raised, 0) # Over, signal logger.info("Signaling") self.run_event.set() # Wait for g in g_event: g.wait(30.0) self.assertTrue(g.isSet()) g_event = None g_array = None # Check it self.assertEquals( Meters.aig("k.db_pool.base.call.connection_acquire"), Meters.aig("k.db_pool.base.call.connection_release")) if check_exception: self.assertEquals( Meters.aig("k.db_pool.base.call.connection_acquire"), Meters.aig("k.db_pool.mysql.call._connection_ping")) self.assertLessEqual( Meters.aig("k.db_pool.mysql.call._get_connection"), pool_max) self.assertLessEqual( Meters.aig("k.db_pool.mysql.call._connection_create"), pool_max) self.assertLessEqual(Meters.aig("k.db_pool.hash.cur"), 1) self.assertLessEqual(Meters.aig("k.db_pool.base.cur_size"), pool_max) self.assertLessEqual(Meters.aig("k.db_pool.base.max_size"), pool_max) self.assertEquals(Meters.aig("k.db_pool.mysql.call.__init"), 1) finally: self.run_event.set() if g_event: for g in g_event: g.set() if g_array: for g in g_array: g.kill()