def _bench(self, per_sec_multi, key, method_to_call, *args): """ Internal bench :param per_sec_multi: Multiply per_sec (case of lists) :type per_sec_multi: int :param key: For log :type key: str :param method_to_call: Method point :type callable :param args: args :type args: object """ if isinstance(args[0], list): # noinspection PyArgumentList,PyTypeChecker logger.debug("Using list, len=%s", len(args[0])) i = 0 loop = 0 per_loop = self.per_loop ms = SolBase.mscurrent() while SolBase.msdiff(ms) < self.max_ms: for _ in range(0, per_loop): method_to_call(*args) i += self.per_loop loop += 1 i = i * per_sec_multi ms = SolBase.msdiff(ms) sec = ms / 1000.0 per_sec = i / sec ms = round(ms, 2) sec = round(sec, 2) per_sec = round(per_sec, 2) logger.info("%32s, loop=%8s, i=%8s, ms=%8.2f, sec=%6.2f, per_sec=%12.2f", key, loop, i, ms, sec, per_sec)
def test_watchdog_schedule(self): """ Test :return: """ # Continue callback loop self.callback_return = True # Go self.mem_cache = MemoryCache(watchdog_interval_ms=500, cb_watchdog=self.watchdog_callback) # Put self.mem_cache.put("keyA", b"valA", 60000) self.mem_cache.put("keyD", b"valD", 60000) # Wait a bit ms_start = SolBase.mscurrent() while SolBase.msdiff(ms_start) < (500.0 * 5.0): if self.callback_call >= 1: logger.info("Run 1 exit") break else: SolBase.sleep(1) logger.info("Run 1 done") self.assertEqual(self.callback_call, 1) self.assertEqual(self.callback_evicted, 0) # Wait a bit ms_start = SolBase.mscurrent() while SolBase.msdiff(ms_start) < (500.0 * 5.0): if self.callback_call >= 2: logger.info("Run 2 exit") break else: SolBase.sleep(1) logger.info("Run 2 done") self.assertGreaterEqual(self.callback_call, 2) self.assertEqual(self.callback_evicted, 0) # Stop self.mem_cache.stop_cache() self.mem_cache = None # Wait a bit SolBase.sleep(500 * 2) # Nothing must happen self.assertEqual(self.callback_call, 2) self.assertEqual(self.callback_evicted, 0)
def test_ms(self): """ Test """ ms = SolBase.mscurrent() SolBase.sleep(100) # Gevent 1.3 : this is buggy (may be related to https://github.com/gevent/gevent/issues/1227) self.assertGreaterEqual(SolBase.msdiff(ms), 100) self.assertLessEqual(SolBase.msdiff(ms), 200) sec = SolBase.securrent() SolBase.sleep(1100) # Gevent 1.3 : this is buggy (may be related to https://github.com/gevent/gevent/issues/1227) self.assertGreaterEqual(SolBase.msdiff(sec*1000), 1000) self.assertLessEqual(SolBase.msdiff(sec*1000), 1200)
def handle(self, data, address): """ Handle one udp message :param data: data :type data: str :param address: address :type address: str """ ms_start = SolBase.mscurrent() try: # Handle data pass # Stats Meters.aii("resolvusclient_udp_recv") except Exception as e: # Log logger.warning( "Handle failed, data_len=%s, address=%s, data=%s, ex=%s", len(data), repr(address), repr(data), SolBase.extostr(e)) # Stat Meters.aii("resolvusclient_udp_recv_ex") finally: Meters.dtci("resolvusclient_udp_recv_dtc", SolBase.msdiff(ms_start))
def get(self, key): """ Get from cache. :param key: Any key :type key: str :return An obj or null if not in cache :rtype bytes, None """ ms_start = SolBase.mscurrent() try: if not isinstance(key, (bytes, str)): raise Exception("Key must be (bytes, str)") # Use read redis v = self._read_redis.get(key) if v: Meters.aii(self.meters_prefix + "rcs.cache_get_hit") else: Meters.aii(self.meters_prefix + "rcs.cache_get_miss") return v except Exception as e: logger.warning("Exception, ex=%s", SolBase.extostr(e)) Meters.aii(self.meters_prefix + "rcs.cache_ex") return None finally: Meters.dtci(self.meters_prefix + "rcs.cache_dtc_read", SolBase.msdiff(ms_start))
def _wait_process_exit(self, p): """ Wait process exit :param p: multiprocessing.Process,Popen :type p: multiprocessing.Process,Popen """ logger.info("Waiting process exit") ms = SolBase.mscurrent() while SolBase.msdiff(ms) < self.test_timeout_ms: if isinstance(p, Process): if not p.is_alive(): logger.info("Waiting process exit ok") return else: SolBase.sleep(10) elif isinstance(p, subprocess.Popen): # Popen sc = p.poll() if sc is not None and sc == 0: logger.info("Waiting process exit ok") return else: SolBase.sleep(10) raise Exception("Waiting process exit timeout")
def _daemon_stop(self): """ Stop the Daemon # Status : OK, implemented # - Running : exit 0 => OK # - Not running and pid file exist : exit 1 => OK # - Not running : exit 3 => OK # - Other : 4 => NOT TESTED """ logger.debug("entering") # Get the pid from the pidfile pid = self._get_running_pid() if not pid: logger.info("Daemon is not running, pidFile=%s", self._pidfile) return # Stop it logger.debug("sending SIGTERM, pid=%s, pidFile=%s", pid, self._pidfile) try: os.kill(pid, SIGTERM) except OSError as ex: if ex.errno == errno.ESRCH: logger.info("SIGTERM failed, ESRCH, ex=%s", SolBase.extostr(ex)) else: logger.info("SIGTERM failed, not an ESRCH, ex=%s", SolBase.extostr(ex)) except Exception as ex: logger.info("SIGTERM failed, not an OSError, going exit(1), ex=%s", SolBase.extostr(ex)) sys.exit(1) finally: if os.path.exists(self._pidfile): logger.debug("Removing pidFile=%s", self._pidfile) self._remove_pid_file() # Ok logger.debug("SIGTERM sent") ms_start = SolBase.mscurrent() # Validate proc_target = "/proc/%d" % pid while SolBase.msdiff(ms_start) < self._timeout_ms: if os.path.exists(proc_target): SolBase.sleep(10) continue # Over logger.info("SIGTERM success, pid=%s", pid) self._remove_pid_file() return # Not cool logger.warning("SIGTERM timeout=%s ms, pid=%s", self._timeout_ms, pid)
def test_basic_eviction_with_watchdog_ttl(self): """ Test :return: """ # Go self.mem_cache = MemoryCache(watchdog_interval_ms=1000, cb_watchdog=self.watchdog_callback, cb_evict=self.eviction_callback) # Put self.mem_cache.put("keyA", b"valA", 60000) self.mem_cache.put("keyB", b"valB", 500) self.mem_cache.put("keyC", b"valC", 500) self.mem_cache.put("keyD", b"valD", 60000) logger.info("ms cur=%s", SolBase.mscurrent()) logger.info("A : %s", self.mem_cache.get_raw("keyA")) logger.info("B : %s", self.mem_cache.get_raw("keyB")) logger.info("C : %s", self.mem_cache.get_raw("keyC")) logger.info("D : %s", self.mem_cache.get_raw("keyD")) # Wait a bit ms_start = SolBase.mscurrent() while SolBase.msdiff(ms_start) < (1000.0 * 2.0): if self.callback_call > 0: break else: SolBase.sleep(10) logger.info("ms after wait=%s", SolBase.mscurrent()) logger.info("_hash_key = %s", self.mem_cache._hash_key) logger.info("_hash_context = %s", self.mem_cache._hash_context) # A : must be present # B : must be evicted (TTL elapsed, by watchdog) self.assertEqual(self.callback_call, 1) self.assertEqual(self.callback_evicted, 2) self.assertFalse("valB" in self.mem_cache._hash_key) self.assertFalse("valB" in self.mem_cache._hash_context) self.assertFalse("valC" in self.mem_cache._hash_key) self.assertFalse("valC" in self.mem_cache._hash_context) self.assertIsNone(self.mem_cache.get("keyB")) self.assertIsNone(self.mem_cache.get("keyC")) self.assertEqual(self.mem_cache.get("keyA"), b"valA") self.assertEqual(self.mem_cache.get("keyD"), b"valD") self.assertEqual(self.evict_count, 2) self.assertTrue(self.evict_last_key == "keyB" or self.evict_last_key == "keyC") self.assertTrue(self.evict_last_value == b"valB" or self.evict_last_value == b"valC") self.assertEqual(Meters.aig("mcs.cache_evict_ttl_watchdog"), 2) # Stop self.mem_cache.stop_cache() self.mem_cache = None
def _run_filter(self, ip_addr): lo = logging.getLogger("new_logger") SolBase.context_set("k_ip", ip_addr) SolBase.context_set("z_value", ip_addr) SolBase.context_set("zz_uc", u"B\u001BB") # Emit a log ms = SolBase.mscurrent() while SolBase.msdiff(ms) < 2000.0: logger.info("TEST LOG ip_addr=%s", ip_addr) lo.info("TEST LOG ip_addr=%s", ip_addr) SolBase.sleep(0)
def _watchdog_run(self): """ Watch dog :return Nothing """ if not self._is_started: return reschedule = True try: # Current meters Meters.ai(self.meters_prefix + "mcs.cur_bytes").set( self._current_data_bytes.get()) Meters.ai(self.meters_prefix + "mcs.cur_size_hash").set( len(self._hash_key)) # Evict ms = SolBase.mscurrent() evicted_count = self._evict_all_expired_keys() # Check (evict can take some time) if not self._is_started: return Meters.dtci(self.meters_prefix + "mcs.cache_dtc_watchdog", SolBase.msdiff(ms)) # Stat if evicted_count > 0: Meters.aii(self.meters_prefix + "mcs.cache_evict_ttl_watchdog", evicted_count) # Callback (unittest) if self._cb_watchdog: reschedule = self._cb_watchdog(evicted_count) except Exception as e: if self._is_started: logger.error("_watchdog_run : Exception, id=%s, e=%s", id(self), SolBase.extostr(e)) Meters.aii(self.meters_prefix + "mcs.cache_ex") else: logger.debug("_watchdog_run : Exception, id=%s, e=%s", id(self), SolBase.extostr(e)) reschedule = False finally: Meters.aii(self.meters_prefix + "mcs.cache_watchdog_run_count") # Schedule next write if reschedule and self._is_started: self._schedule_next_watchdog()
def remove(self, key): """ Remove a key from cache. :param key: Any key :type key: str """ ms_start = SolBase.mscurrent() try: if not isinstance(key, (bytes, str)): raise Exception("Key must be (bytes, str)") # Use write redis self._write_redis.delete(key) except Exception as e: logger.warning("Exception, ex=%s", SolBase.extostr(e)) Meters.aii(self.meters_prefix + "rcs.cache_ex") finally: Meters.dtci(self.meters_prefix + "rcs.cache_dtc_write", SolBase.msdiff(ms_start))
def _get_std_err(self): """ Get :return: list :rtype: list """ try: sys.stderr.flush() except ValueError: pass ms_start = SolBase.mscurrent() while True: ar = self._file_to_list(self.daemon_std_err) if len(ar) > 0: return ar elif SolBase.msdiff(ms_start) > self.std_err_timeout_ms: return list() else: SolBase.sleep(10)
def _lifecycle_log_status(self): """ Run """ try: with self._lifecycle_locker: # Check ms_diff = SolBase.msdiff(self._lifecycle_last_log_ms) if ms_diff < self._lifecycle_interval_ms: return # Log now self._lifecycle_last_log_ms = SolBase.mscurrent() # noinspection PyProtectedMember lifecyclelogger.info( "self=%s", # Id id(self), ) except Exception as e: logger.warning("Exception, ex=%s", SolBase.extostr(e))
def _get_std_out_file(self, file_name): """ Get :param file_name: str :type file_name: str :return: list :rtype: list """ try: sys.stdout.flush() except ValueError: pass ms_start = SolBase.mscurrent() while True: ar = self._file_to_list(file_name) if len(ar) > 0: return ar elif SolBase.msdiff(ms_start) > self.stdout_timeout_ms: return list() else: SolBase.sleep(10)
def go_http(self, http_request): """ Perform an http request :param http_request: HttpRequest :type http_request: HttpRequest :return HttpResponse :rtype HttpResponse """ ms = SolBase.mscurrent() http_response = HttpResponse() general_timeout_sec = float(http_request.general_timeout_ms) / 1000.0 try: # Assign request http_response.http_request = http_request # Fire gevent.with_timeout( general_timeout_sec, self._go_http_internal, http_request, http_response) SolBase.sleep(0) except Timeout: # Failed http_response.exception = Exception("Timeout while processing, general_timeout_sec={0}".format(general_timeout_sec)) except Exception as e: # Failed http_response.exception = e finally: # Switch SolBase.sleep(0) # Assign ms http_response.elapsed_ms = SolBase.msdiff(ms) # Return return http_response
def _go_greenlet(self, greenlet_count, put_count, get_count, bench_item_count): """ Doc :param greenlet_count: greenlet_count :param put_count: put_count :param get_count: get_count :param bench_item_count: bench_item_count """ g_event = None g_array = None try: # Settings g_count = greenlet_count g_ms = 10000 # Continue callback loop self.callback_return = True # Go self.redis_cache = RedisCache() # Item count self.bench_item_count = bench_item_count self.bench_put_weight = put_count self.bench_get_weight = get_count self.bench_ttl_min_ms = 1000 self.bench_ttl_max_ms = int(g_ms / 2) # Go self.run_event = Event() self.exception_raised = 0 self.open_count = 0 self.thread_running = AtomicIntSafe() self.thread_running_ok = AtomicIntSafe() # Item per greenlet item_per_greenlet = self.bench_item_count / g_count # Signal self.gorun_event = Event() # Alloc greenlet g_array = list() g_event = list() for _ in range(0, g_count): greenlet = Greenlet() g_array.append(greenlet) g_event.append(Event()) # Run them cur_idx = 0 for idx in range(0, len(g_array)): greenlet = g_array[idx] event = g_event[idx] greenlet.spawn(self._run_cache_bench, event, cur_idx, cur_idx + item_per_greenlet) cur_idx += item_per_greenlet SolBase.sleep(0) # Signal self.gorun_event.set() # Wait a bit dt = SolBase.mscurrent() while SolBase.msdiff(dt) < g_ms: SolBase.sleep(500) # Stat ms = SolBase.msdiff(dt) sec = float(ms / 1000.0) total_put = Meters.aig("rcs.cache_put") per_sec_put = round(float(total_put) / sec, 2) total_get = Meters.aig("rcs.cache_get_hit") + Meters.aig( "rcs.cache_get_miss") per_sec_get = round(float(total_get) / sec, 2) logger.info( "Running..., count=%s, run=%s, ok=%s, put/sec=%s get/sec=%s, cache=%s", self.open_count, self.thread_running.get(), self.thread_running_ok.get(), per_sec_put, per_sec_get, self.redis_cache) self.assertEqual(self.exception_raised, 0) # Over, signal logger.info("Signaling, count=%s", self.open_count) self.run_event.set() # Wait for g in g_event: g.wait(30.0) self.assertTrue(g.isSet()) g_event = None g_array = None # Log Meters.write_to_logger() finally: self.run_event.set() if g_event: for g in g_event: g.set() if g_array: for g in g_array: g.kill() if self.redis_cache: self.redis_cache.stop_cache() self.redis_cache = None
def _go_gevent(self, http_request, http_response): """ Perform an http request :param http_request: HttpRequest :type http_request: HttpRequest :param http_response: HttpResponse :type http_response: HttpResponse """ # Implementation http_response.http_implementation = HttpClient.HTTP_IMPL_GEVENT # Uri url = URL(http_request.uri) SolBase.sleep(0) # Patch for path attribute error try: _ = url.path except AttributeError: url.path = "/" # Get instance logger.debug("Get pool") http = self.gevent_from_pool(url, http_request) logger.debug("Get pool done, pool=%s", http) SolBase.sleep(0) # Fire ms_start = SolBase.mscurrent() logger.debug("Http now") if not http_request.method: # ---------------- # Auto-detect # ---------------- if http_request.post_data: # Post response = http.post(url.request_uri, body=http_request.post_data, headers=http_request.headers) else: # Get response = http.get(url.request_uri, headers=http_request.headers) else: # ---------------- # Use input # ---------------- if http_request.method == "GET": response = http.get(url.request_uri, headers=http_request.headers) elif http_request.method == "DELETE": response = http.delete(url.request_uri, body=http_request.post_data, headers=http_request.headers) elif http_request.method == "HEAD": response = http.head(url.request_uri, headers=http_request.headers) elif http_request.method == "PUT": response = http.put(url.request_uri, body=http_request.post_data, headers=http_request.headers) elif http_request.method == "POST": response = http.post(url.request_uri, body=http_request.post_data, headers=http_request.headers) elif http_request.method == "PATCH": raise Exception("Unsupported gevent method={0}".format(http_request.method)) elif http_request.method == "OPTIONS": raise Exception("Unsupported gevent method={0}".format(http_request.method)) elif http_request.method == "TRACE": raise Exception("Unsupported gevent method={0}".format(http_request.method)) else: raise Exception("Invalid gevent method={0}".format(http_request.method)) logger.debug("Http done, ms=%s", SolBase.msdiff(ms_start)) SolBase.sleep(0) # Check if not response: raise Exception("No response from http") # Process it http_response.status_code = response.status_code # Read ms_start = SolBase.mscurrent() logger.debug("Read now") http_response.buffer = response.read() SolBase.sleep(0) logger.debug("Read done, ms=%s", SolBase.msdiff(ms_start)) if response.content_length: http_response.content_length = response.content_length else: if http_response.buffer: http_response.content_length = len(http_response.buffer) else: http_response.content_length = 0 # noinspection PyProtectedMember for k, v in response._headers_index.items(): HttpClient._add_header(http_response.headers, k, v) response.should_close() # Over SolBase.sleep(0)
def _go_greenlet(self, greenlet_count, pool_max, sql="SELECT user, host FROM mysql.user LIMIT 1;", check_exception=True): """ Doc :param greenlet_count: greenlet_count :type greenlet_count: int :param pool_max: Pool max size :type pool_max: int :param sql: str :type sql: str :param check_exception: bool :type check_exception: bool """ MysqlApi.reset_pools() Meters.reset() g_event = None g_array = None try: # Settings g_count = greenlet_count g_ms = 5000 # Go self.pool_max = pool_max self.run_event = Event() self.exception_raised = 0 self.pool_sql = sql self.thread_running = AtomicIntSafe() self.thread_running_ok = AtomicIntSafe() # Signal self.gorun_event = Event() # Alloc greenlet g_array = list() g_event = list() for _ in range(0, g_count): greenlet = Greenlet() g_array.append(greenlet) g_event.append(Event()) # Run them for idx in range(0, len(g_array)): greenlet = g_array[idx] event = g_event[idx] greenlet.spawn(self._run_mysql_bench, event) SolBase.sleep(0) # Signal self.gorun_event.set() # Wait a bit dt = SolBase.mscurrent() while SolBase.msdiff(dt) < g_ms: SolBase.sleep(1000) # Stat ms = SolBase.msdiff(dt) sec = float(ms / 1000.0) total_acquire = Meters.aig( "k.db_pool.base.call.connection_acquire") per_sec_acquire = round(float(total_acquire) / sec, 2) total_release = Meters.aig( "k.db_pool.base.call.connection_release") per_sec_release = round(float(total_release) / sec, 2) logger.info("Running..., run=%s, ok=%s, ps.ack/rel=%s/%s", self.thread_running.get(), self.thread_running_ok.get(), per_sec_acquire, per_sec_release) if check_exception: self.assertEqual(self.exception_raised, 0) # Over, signal logger.info("Signaling") self.run_event.set() # Wait for g in g_event: g.wait(30.0) self.assertTrue(g.isSet()) g_event = None g_array = None # Check it self.assertEquals( Meters.aig("k.db_pool.base.call.connection_acquire"), Meters.aig("k.db_pool.base.call.connection_release")) if check_exception: self.assertEquals( Meters.aig("k.db_pool.base.call.connection_acquire"), Meters.aig("k.db_pool.mysql.call._connection_ping")) self.assertLessEqual( Meters.aig("k.db_pool.mysql.call._get_connection"), pool_max) self.assertLessEqual( Meters.aig("k.db_pool.mysql.call._connection_create"), pool_max) self.assertLessEqual(Meters.aig("k.db_pool.hash.cur"), 1) self.assertLessEqual(Meters.aig("k.db_pool.base.cur_size"), pool_max) self.assertLessEqual(Meters.aig("k.db_pool.base.max_size"), pool_max) self.assertEquals(Meters.aig("k.db_pool.mysql.call.__init"), 1) finally: self.run_event.set() if g_event: for g in g_event: g.set() if g_array: for g in g_array: g.kill()
def test_start_status_reload_stop_logfile(self): """ Test """ try: # Start self._reset_std_capture() main_helper_file = self.current_dir + "CustomDaemon.py" main_helper_file = abspath(main_helper_file) self.assertTrue(FileUtility.is_file_exist(main_helper_file)) # Params ar = list() ar.append(sys.executable) ar.append(main_helper_file) ar.append("-pidfile={0}".format(self.daemon_pid_file)) ar.append("-stderr={0}".format(self.daemon_std_err)) ar.append("-stdout=/dev/null") ar.append("-logfile={0}".format(self.daemon_std_out)) ar.append("start") # ========================= # START # ========================= # Launch logger.info("Start : %s", " ".join(ar)) p = subprocess.Popen(args=ar) logger.info("Started") SolBase.sleep(0) self._wait_process(p) # Try wait for stdout ms_start = SolBase.mscurrent() while SolBase.msdiff(ms_start) < self.stdout_timeout_ms: if "n".join(self._get_std_out()).find( " INFO | CustomDaemon@_on_start") >= 0: break else: SolBase.sleep(10) # Get std (caution, we are async since forked) logger.info("stdOut ### START") for s in self._get_std_out(): logger.info("stdOut => %s", s) logger.info("stdOut ### END") logger.info("stdErr ### START") for s in self._get_std_err(): logger.info("stdErr => %s", s) logger.info("stdErr ### END") # Check self.assertTrue(len(self._get_std_err()) == 0) self.assertTrue(len(self._get_std_out()) > 0) self.assertTrue("n".join(self._get_std_out()).find(" ERROR ") < 0) self.assertTrue("n".join(self._get_std_out()).find( " INFO | CustomDaemon@_on_start") >= 0) self.assertTrue("n".join(self._get_std_out()).find(" WARN ") < 0) # ========================= # STATUS # ========================= for _ in range(0, 10): # Args ar = list() ar.append(sys.executable) ar.append(main_helper_file) ar.append("-pidfile={0}".format(self.daemon_pid_file)) ar.append("status") # Launch p = subprocess.Popen(args=ar) self._wait_process(p) # ========================= # RELOAD # ========================= for _ in range(0, 10): # Args ar = list() ar.append(sys.executable) ar.append(main_helper_file) ar.append("-pidfile={0}".format(self.daemon_pid_file)) ar.append("reload") # Launch p = subprocess.Popen(args=ar) self._wait_process(p) # ========================= # STOP # ========================= # Args ar = list() ar.append(sys.executable) ar.append(main_helper_file) ar.append("-pidfile={0}".format(self.daemon_pid_file)) ar.append("stop") # Launch p = subprocess.Popen(args=ar) self._wait_process(p) # ========================= # OVER, CHECK LOGS # ========================= # Try wait for stdout ms_start = SolBase.mscurrent() while SolBase.msdiff(ms_start) < self.stdout_timeout_ms: if "n".join(self._get_std_out()).find(" INFO | CustomDaemon@_on_stop") >= 0 \ and "n".join(self._get_std_out()).find(" INFO | CustomDaemon@_on_status") >= 0: break else: SolBase.sleep(10) # Get std (caution, we are async since forked) logger.info("stdOut ### START") for s in self._get_std_out(): logger.info("stdOut => %s", s) logger.info("stdOut ### END") logger.info("stdErr ### START") for s in self._get_std_err(): logger.info("stdErr => %s", s) logger.info("stdErr ### END") # Check self.assertTrue(len(self._get_std_err()) == 0) self.assertTrue(len(self._get_std_out()) > 0) self.assertTrue("n".join(self._get_std_out()).find(" ERROR ") < 0) self.assertTrue("n".join(self._get_std_out()).find( " INFO | CustomDaemon@_on_start") >= 0) self.assertTrue("n".join(self._get_std_out()).find( " INFO | CustomDaemon@_on_stop") >= 0) self.assertTrue("n".join(self._get_std_out()).find( " INFO | CustomDaemon@_on_status") >= 0) self.assertTrue("n".join(self._get_std_out()).find(" WARN ") < 0) # ========================= # OVER, CHECK ACTION FILE # ========================= buf = FileUtility.file_to_textbuffer( CustomDaemon.DAEMON_LAST_ACTION_FILE, "ascii") self.assertTrue(buf.find("is_running=False") >= 0) self.assertTrue(buf.find("start_count=1") >= 0) self.assertTrue(buf.find("stop_count=1") >= 0) self.assertTrue(buf.find("status_count=10") >= 0) self.assertTrue(buf.find("reload_count=10") >= 0) self.assertTrue(buf.find("last_action=stop") >= 0) finally: logger.debug("Exiting test, idx=%s", self.run_idx)
def _go_greenlet(self, greenlet_count, put_count, get_count, bench_item_count, watchdog_interval_ms=60000, max_item=128000, max_bytes=32 * 1024 * 1024, max_single_item_bytes=1 * 1024 * 1024, purge_min_bytes=8 * 1024 * 1024, purge_min_count=1000): """ Doc :param greenlet_count: greenlet_count :param put_count: put_count :param get_count: get_count :param bench_item_count: bench_item_count :param watchdog_interval_ms: watchdog_interval_ms :param max_item: max_item :param max_bytes: max_bytes :param max_single_item_bytes: max_single_item_bytes :param purge_min_bytes: purge_min_bytes :param purge_min_count: purge_min_count """ g_event = None g_array = None try: # Settings g_count = greenlet_count g_ms = 10000 # Continue callback loop self.callback_return = True # Go self.mem_cache = MemoryCache( watchdog_interval_ms=watchdog_interval_ms, max_item=max_item, max_bytes=max_bytes, max_single_item_bytes=max_single_item_bytes, purge_min_bytes=purge_min_bytes, purge_min_count=purge_min_count) # Item count self.bench_item_count = bench_item_count self.bench_put_weight = put_count self.bench_get_weight = get_count self.bench_ttl_min_ms = 1000 self.bench_ttl_max_ms = int(g_ms / 2) # Go self.run_event = Event() self.exception_raised = 0 self.open_count = 0 self.thread_running = AtomicIntSafe() self.thread_running_ok = AtomicIntSafe() # Item per greenlet item_per_greenlet = self.bench_item_count / g_count # Signal self.gorun_event = Event() # Alloc greenlet g_array = list() g_event = list() for _ in range(0, g_count): greenlet = Greenlet() g_array.append(greenlet) g_event.append(Event()) # Run them cur_idx = 0 for idx in range(0, len(g_array)): greenlet = g_array[idx] event = g_event[idx] greenlet.spawn(self._run_cache_bench, event, cur_idx, cur_idx + item_per_greenlet) cur_idx += item_per_greenlet SolBase.sleep(0) # Signal self.gorun_event.set() # Wait a bit dt = SolBase.mscurrent() while SolBase.msdiff(dt) < g_ms: SolBase.sleep(500) # Stat ms = SolBase.msdiff(dt) sec = float(ms / 1000.0) total_put = Meters.aig("mcs.cache_put") per_sec_put = round(float(total_put) / sec, 2) total_get = Meters.aig("mcs.cache_get_hit") + Meters.aig( "mcs.cache_get_miss") per_sec_get = round(float(total_get) / sec, 2) logger.info( "Running..., count=%s, run=%s, ok=%s, put/sec=%s get/sec=%s, cache=%s", self.open_count, self.thread_running.get(), self.thread_running_ok.get(), per_sec_put, per_sec_get, self.mem_cache) self.assertEqual(self.exception_raised, 0) # Over, signal logger.info("Signaling, count=%s", self.open_count) self.run_event.set() # Wait for g in g_event: g.wait(30.0) self.assertTrue(g.isSet()) g_event = None g_array = None # Log Meters.write_to_logger() finally: self.run_event.set() if g_event: for g in g_event: g.set() if g_array: for g in g_array: g.kill() if self.mem_cache: max_count = 0 total_size = 0 i = 0 for (k, v) in self.mem_cache._hash_key.items(): i += 1 total_size += len(k) + len(v[1]) if i < max_count: logger.info("%s => %s", k, v) self.assertEqual(total_size, self.mem_cache._current_data_bytes.get()) self.mem_cache.stop_cache() self.mem_cache = None