def handle(self, data, address): """ Handle one udp message :param data: data :type data: str :param address: address :type address: str """ ms_start = SolBase.mscurrent() try: # Handle data pass # Stats Meters.aii("resolvusclient_udp_recv") except Exception as e: # Log logger.warning( "Handle failed, data_len=%s, address=%s, data=%s, ex=%s", len(data), repr(address), repr(data), SolBase.extostr(e)) # Stat Meters.aii("resolvusclient_udp_recv_ex") finally: Meters.dtci("resolvusclient_udp_recv_dtc", SolBase.msdiff(ms_start))
def connect(self): """ Connect to server. :return Return true upon success. """ # Stats Meters.aii("ping.client.client_connect_count") # Call base dt_start = SolBase.datecurrent() b = TcpSimpleClient.connect(self) if not b: # Stat logger.error("PingSimpleClient : connect failed, fatal, exiting") Meters.aii("ping.client.client_connect_error") # Exit return False # Stat Meters.dtci("ping.client.delay_client_connect", SolBase.datediff(dt_start)) # SSL stats ms = self._get_ssl_handshake_ms() if ms: Meters.dtci("ping.client.delay_client_sslhandshake", ms) # Send hello self._protocol_client_hello_send() return True
def get(self, key): """ Get from cache. :param key: Any key :type key: str :return An obj or null if not in cache :rtype bytes, None """ ms_start = SolBase.mscurrent() try: if not isinstance(key, (bytes, str)): raise Exception("Key must be (bytes, str)") # Use read redis v = self._read_redis.get(key) if v: Meters.aii(self.meters_prefix + "rcs.cache_get_hit") else: Meters.aii(self.meters_prefix + "rcs.cache_get_miss") return v except Exception as e: logger.warning("Exception, ex=%s", SolBase.extostr(e)) Meters.aii(self.meters_prefix + "rcs.cache_ex") return None finally: Meters.dtci(self.meters_prefix + "rcs.cache_dtc_read", SolBase.msdiff(ms_start))
def _protocol_client_ping_server_reply(self): """ A client ping has been replied """ with self._protocol_lock: # Must have a timeout ongoing if self._ping_timeout_greenlet is None: # Ping reply received without ongoing ping Meters.aii( "ping.client.client_ping_server_reply_noping_ongoing") else: # Unschedule the timeout self._unschedule_client_pingservertimeout() # Reschedule a ping self._schedule_clientping() # Stats Meters.aii("ping.client.client_ping_server_reply") # Delay and stats Meters.dtci("ping.client.delay_client_ping_toserver_reply", SolBase.datediff(self.dt_last_ping_send))
def _protocol_client_hello_server_reply(self, item): """ Received a hello reply :param item: Received server reply ("C.HI.REPLY PID=...") """ with self._protocol_lock: # Parse it self._server_pid = item.replace("C.HI.REPLY PID=", "") # Unschedule timeout self._unschedule_client_helloservertimeout() # Stats Meters.aii("ping.client.client_hello_server_reply") # Delay ms = SolBase.datediff(self.dt_hello_send) # Stats Meters.dtci("ping.client.delay_client_hello_toserver_reply", ms) # Initiate the ping loop, with random start self._schedule_clientping(True)
def _protocol_server_ping_client_reply(self): """ A client ping has been replied """ with self._protocolLock: # Must have a timeout ongoing if self._pingTimeOutGreenlet is None: # Ping reply received without ongoing ping Meters.aii( "ping.server.serverPingServerClientReplyNoPingOngoing") else: # Unschedule the timeout self._unschedule_server_ping_client_timeout() # Reschedule a ping self._schedule_server_ping() # Stats Meters.aii("ping.server.serverPingClientReply") # Delay Meters.dtci("ping.server.client_ping_server_reply", SolBase.datediff(self.dtLastPingSend))
def _watchdog_run(self): """ Watch dog :return Nothing """ if not self._is_started: return reschedule = True try: # Current meters Meters.ai(self.meters_prefix + "mcs.cur_bytes").set( self._current_data_bytes.get()) Meters.ai(self.meters_prefix + "mcs.cur_size_hash").set( len(self._hash_key)) # Evict ms = SolBase.mscurrent() evicted_count = self._evict_all_expired_keys() # Check (evict can take some time) if not self._is_started: return Meters.dtci(self.meters_prefix + "mcs.cache_dtc_watchdog", SolBase.msdiff(ms)) # Stat if evicted_count > 0: Meters.aii(self.meters_prefix + "mcs.cache_evict_ttl_watchdog", evicted_count) # Callback (unittest) if self._cb_watchdog: reschedule = self._cb_watchdog(evicted_count) except Exception as e: if self._is_started: logger.error("_watchdog_run : Exception, id=%s, e=%s", id(self), SolBase.extostr(e)) Meters.aii(self.meters_prefix + "mcs.cache_ex") else: logger.debug("_watchdog_run : Exception, id=%s, e=%s", id(self), SolBase.extostr(e)) reschedule = False finally: Meters.aii(self.meters_prefix + "mcs.cache_watchdog_run_count") # Schedule next write if reschedule and self._is_started: self._schedule_next_watchdog()
def remove(self, key): """ Remove a key from cache. :param key: Any key :type key: str """ ms_start = SolBase.mscurrent() try: if not isinstance(key, (bytes, str)): raise Exception("Key must be (bytes, str)") # Use write redis self._write_redis.delete(key) except Exception as e: logger.warning("Exception, ex=%s", SolBase.extostr(e)) Meters.aii(self.meters_prefix + "rcs.cache_ex") finally: Meters.dtci(self.meters_prefix + "rcs.cache_dtc_write", SolBase.msdiff(ms_start))
def _meters_inject(cls, count): """ Inject meters for chunk test :param count: int :type count: int """ # Inject meters Meters.reset() for i in range(0, count): # 10 tags each d_tags = dict() for k in range(0, 10): d_tags["TAG_aaaaaaaaaaaaaaaaa_%s_%s" % (i, k)] = "VAL_aaaaaaaaaaaaaaaaa_%s_%s" % (i, k) # Inject Meters.aii("k.meters.aii_udp_check_%s" % i, tags=d_tags) # Dtc for ms in [0, 10, 100, 500, 5000, 10000, 60000]: Meters.dtci("k.meters.dtci_udp_check_%s" % i, ms, tags=d_tags)
def test_meters(self): """ Test """ ai1a = Meters.ai("ai1") self.assertIsInstance(ai1a, AtomicIntSafe) ai1b = Meters.ai("ai1") self.assertEqual(id(ai1a), id(ai1b)) ai1a = Meters.aii("ai1") self.assertEqual(ai1a.get(), 1) ai1a = Meters.aii("ai1", 2) self.assertEqual(ai1a.get(), 3) self.assertEqual(ai1a.get(), Meters.aig("ai1")) af1a = Meters.af("af1") self.assertIsInstance(af1a, AtomicFloatSafe) af1b = Meters.af("af1") self.assertEqual(id(af1a), id(af1b)) af1a = Meters.afi("af1") self.assertEqual(af1a.get(), 1.0) af1a = Meters.afi("af1", 2.0) self.assertEqual(af1a.get(), 3.0) self.assertEqual(af1a.get(), Meters.afg("af1")) dtc1a = Meters.dtc("dtc1") self.assertIsInstance(dtc1a, DelayToCountSafe) dtc1b = Meters.dtc("dtc1") self.assertEqual(id(dtc1a), id(dtc1b)) Meters.dtci("dtc1", 0) Meters.dtci("dtc1", 50) Meters.dtci("dtc1", 100) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[0].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[50].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[100].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[500].get(), 0) Meters.dtc("dtc1").to_dict() # Write Meters.write_to_logger()
def test_meters_to_udp(self): """ Test """ ai1a = Meters.ai("ai1") self.assertIsInstance(ai1a, AtomicIntSafe) ai1b = Meters.ai("ai1") self.assertEqual(id(ai1a), id(ai1b)) ai1a = Meters.aii("ai1") self.assertEqual(ai1a.get(), 1) ai1a = Meters.aii("ai1", 2) self.assertEqual(ai1a.get(), 3) self.assertEqual(ai1a.get(), Meters.aig("ai1")) af1a = Meters.af("af1") self.assertIsInstance(af1a, AtomicFloatSafe) af1b = Meters.af("af1") self.assertEqual(id(af1a), id(af1b)) af1a = Meters.afi("af1") self.assertEqual(af1a.get(), 1.0) af1a = Meters.afi("af1", 2.0) self.assertEqual(af1a.get(), 3.0) self.assertEqual(af1a.get(), Meters.afg("af1")) dtc1a = Meters.dtc("dtc1") self.assertIsInstance(dtc1a, DelayToCountSafe) dtc1b = Meters.dtc("dtc1") self.assertEqual(id(dtc1a), id(dtc1b)) Meters.dtci("dtc1", 0) Meters.dtci("dtc1", 50) Meters.dtci("dtc1", 100) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[0].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[50].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[100].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[500].get(), 0) # Write Meters.write_to_logger() # Serialize ar_json = Meters.meters_to_udp_format(send_pid=True, send_dtc=True) logger.info("Got ar_json=%s", ar_json) for cur_ar in ar_json: logger.info("Got cur_ar=%s", cur_ar) # Serialize, no dtc ar_json = Meters.meters_to_udp_format(send_pid=True, send_dtc=False) logger.info("Got ar_json=%s", ar_json) for cur_ar in ar_json: logger.info("Got cur_ar=%s", cur_ar) # Send to daemon (assuming its up locally) Meters.send_udp_to_knockdaemon() Meters.send_udp_to_knockdaemon(send_dtc=True) Meters.send_udp_to_knockdaemon(send_dtc=False) # ------------------------ # UDP Scheduler test # ------------------------ # Check self.assertIsNone(Meters.UDP_SCHEDULER_GREENLET) self.assertFalse(Meters.UDP_SCHEDULER_STARTED) # Start Meters.udp_scheduler_start(send_interval_ms=500) # Check self.assertIsNotNone(Meters.UDP_SCHEDULER_GREENLET) self.assertTrue(Meters.UDP_SCHEDULER_STARTED) # Start again Meters.udp_scheduler_start(send_interval_ms=500) # Check again self.assertIsNotNone(Meters.UDP_SCHEDULER_GREENLET) self.assertTrue(Meters.UDP_SCHEDULER_STARTED) # Interval is 500 => we sleep 3.250 sec, we assume we must have at least 500, 1000, 1500, 2000, 2500, 3000 run => 6 runs SolBase.sleep(3250) # Check self.assertGreaterEqual(Meters.aig("k.meters.udp.run.ok"), 6) self.assertEqual(Meters.aig("k.meters.udp.run.ex"), 0) self.assertIsNotNone(Meters.UDP_SCHEDULER_GREENLET) self.assertTrue(Meters.UDP_SCHEDULER_STARTED) # We stop Meters.udp_scheduler_stop() self.assertIsNone(Meters.UDP_SCHEDULER_GREENLET) self.assertFalse(Meters.UDP_SCHEDULER_STARTED) # Sleep again and check no more running cur_run = Meters.aig("k.meters.udp.run.ok") SolBase.sleep(2000) self.assertEqual(cur_run, Meters.aig("k.meters.udp.run.ok"))
def test_meters_with_tags_b(self): """ Test """ hca = Meters._tags_hash_compute_and_store({"flag1": "FA"}) hcb = Meters._tags_hash_compute_and_store({"flag2": "FB"}) Meters.aii("ai1") Meters.aii("ai1", tags={"flag1": "FA"}) Meters.aii("ai1", tags={"flag1": "FA"}) Meters.aii("ai1", tags={"flag2": "FB"}) Meters.aii("ai1", tags={"flag2": "FB"}) Meters.aii("ai1", tags={"flag2": "FB"}) Meters.dtci("dtc1", 0) Meters.dtci("dtc1", 50) Meters.dtci("dtc1", 100) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[0].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[50].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[100].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[500].get(), 0) Meters.dtci("dtc1", 0, tags={"flag1": "FA"}) Meters.dtci("dtc1", 50, tags={"flag1": "FA"}) Meters.dtci("dtc1", 100, tags={"flag1": "FA"}) Meters.dtci("dtc1", 0, tags={"flag2": "FB"}) Meters.dtci("dtc1", 50, tags={"flag2": "FB"}) Meters.dtci("dtc1", 100, tags={"flag2": "FB"}) Meters.dtci("dtc1", 0, tags={"flag2": "FB"}) Meters.dtci("dtc1", 50, tags={"flag2": "FB"}) Meters.dtci("dtc1", 100, tags={"flag2": "FB"}) self.assertEquals(Meters._hash_meter["a_int"]["ai1#"].get(), 1) self.assertEquals(Meters._hash_meter["a_int"]["ai1#" + hca].get(), 2) self.assertEquals(Meters._hash_meter["a_int"]["ai1#" + hcb].get(), 3) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#" + hca]._sorted_dict[0].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#" + hca]._sorted_dict[50].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#" + hca]._sorted_dict[100].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#" + hca]._sorted_dict[500].get(), 0) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#" + hcb]._sorted_dict[0].get(), 2) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#" + hcb]._sorted_dict[50].get(), 2) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#" + hcb]._sorted_dict[100].get(), 2) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#" + hcb]._sorted_dict[500].get(), 0) # Write Meters.write_to_logger()
def test_meters_with_tags_a_with_udp_check(self): """ Test """ hca = Meters._tags_hash_compute_and_store({"flag": "FA"}) hcb = Meters._tags_hash_compute_and_store({"flag": "FB"}) Meters.aii("ai1") Meters.aii("ai1", tags={"flag": "FA"}) Meters.aii("ai1", tags={"flag": "FA"}) Meters.aii("ai1", tags={"flag": "FB"}) Meters.aii("ai1", tags={"flag": "FB"}) Meters.aii("ai1", tags={"flag": "FB"}) Meters.dtci("dtc1", 0) Meters.dtci("dtc1", 50) Meters.dtci("dtc1", 100) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[0].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[50].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[100].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[500].get(), 0) Meters.dtci("dtc1", 0, tags={"flag": "FA"}) Meters.dtci("dtc1", 50, tags={"flag": "FA"}) Meters.dtci("dtc1", 100, tags={"flag": "FA"}) Meters.dtci("dtc1", 0, tags={"flag": "FB"}) Meters.dtci("dtc1", 50, tags={"flag": "FB"}) Meters.dtci("dtc1", 100, tags={"flag": "FB"}) Meters.dtci("dtc1", 0, tags={"flag": "FB"}) Meters.dtci("dtc1", 50, tags={"flag": "FB"}) Meters.dtci("dtc1", 100, tags={"flag": "FB"}) self.assertEquals(Meters._hash_meter["a_int"]["ai1#"].get(), 1) self.assertEquals(Meters._hash_meter["a_int"]["ai1#" + hca].get(), 2) self.assertEquals(Meters._hash_meter["a_int"]["ai1#" + hcb].get(), 3) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#" + hca]._sorted_dict[0].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#" + hca]._sorted_dict[50].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#" + hca]._sorted_dict[100].get(), 1) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#" + hca]._sorted_dict[500].get(), 0) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#" + hcb]._sorted_dict[0].get(), 2) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#" + hcb]._sorted_dict[50].get(), 2) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#" + hcb]._sorted_dict[100].get(), 2) self.assertEquals(Meters._hash_meter["dtc"]["dtc1#" + hcb]._sorted_dict[500].get(), 0) # Write Meters.write_to_logger() # Upd check ar_udp = Meters.meters_to_udp_format(send_pid=True, send_tags=True, send_dtc=True) total_check = 0 total_ok = 0 total_ko_not_found = 0 total_ko_multiple_found = 0 for s_key, d_tag, v in [ ("ai1", {}, 1), ("ai1", {"flag": "FA", }, 2), ("ai1", {"flag": "FB", }, 3), ("dtc1_0-50", {}, 1), ("dtc1_50-100", {}, 1), ("dtc1_100-500", {}, 1), ("dtc1_500-1000", {}, 0), ("dtc1_1000-2500", {}, 0), ("dtc1_2500-5000", {}, 0), ("dtc1_5000-10000", {}, 0), ("dtc1_10000-30000", {}, 0), ("dtc1_30000-60000", {}, 0), ("dtc1_60000-MAX", {}, 0), ("dtc1_0-50", {"flag": "FA", }, 1), ("dtc1_50-100", {"flag": "FA", }, 1), ("dtc1_100-500", {"flag": "FA", }, 1), ("dtc1_500-1000", {"flag": "FA", }, 0), ("dtc1_1000-2500", {"flag": "FA", }, 0), ("dtc1_2500-5000", {"flag": "FA", }, 0), ("dtc1_5000-10000", {"flag": "FA", }, 0), ("dtc1_10000-30000", {"flag": "FA", }, 0), ("dtc1_30000-60000", {"flag": "FA", }, 0), ("dtc1_60000-MAX", {"flag": "FA", }, 0), ("dtc1_0-50", {"flag": "FB", }, 2), ("dtc1_50-100", {"flag": "FB", }, 2), ("dtc1_100-500", {"flag": "FB", }, 2), ("dtc1_500-1000", {"flag": "FB", }, 0), ("dtc1_1000-2500", {"flag": "FB", }, 0), ("dtc1_2500-5000", {"flag": "FB", }, 0), ("dtc1_5000-10000", {"flag": "FB", }, 0), ("dtc1_10000-30000", {"flag": "FB", }, 0), ("dtc1_30000-60000", {"flag": "FB", }, 0), ("dtc1_60000-MAX", {"flag": "FB", }, 0), ]: total_check += 1 # Locate it found = 0 for check_key, check_tag, check_v, _, _ in ar_udp: if check_key == s_key and check_tag == d_tag and check_v == v: found += 1 # Check if found == 0: total_ko_not_found += 1 elif found > 1: total_ko_multiple_found += 1 else: total_ok += 1 # Final self.assertEquals(total_ko_multiple_found, 0) self.assertEquals(total_ko_multiple_found, 0) self.assertEquals(total_ok, total_check) self.assertEquals(len(ar_udp), total_check)
def _read_loop_internal(self): """ Low level read loop on socket """ logger.debug("entering now, self=%s", self) try: while self.__is_running(): try: if self.__ssl_handshake_pending: # Pending SSL handshake + received something # Handle SSL handshake now # Stats Meters.dtci("tcp.server.delay_server_accept_to_sslhandshakestart", SolBase.datediff(self._dt_created)) # Timestamps self._dt_last_recv = SolBase.datecurrent() # Debug ONLY if self.__ssl_wait_debug_ms: logger.warning("DEBUG : forcing a wait for SSL handshake timeout, ms=%s, self=%s", self.__ssl_wait_debug_ms, self) SolBase.sleep(self.__ssl_wait_debug_ms) logger.warning("DEBUG : forcing a wait for SSL handshake timeout, done, self=%s", self) # Do the handshake # TODO : gevent 1.3 : This is now broken due to underlying _sslobj None. To be checked. SSL support current disable. raise Exception("SSL Support currently disabled") # noinspection PyUnreachableCode self.current_socket.do_handshake() # Done, cancel timeout self._unschedule_ssl_handshake_timeout() # Ms ms = SolBase.datediff(self._dt_last_recv) # SSL Stats (for client) self._set_ssl_handshake_ms(ms) # Server stats Meters.dtci("tcp.server.delay_server_sslhandshake", ms) # Done self.__ssl_handshake_pending = False # Non blocking mode now self.current_socket.setblocking(0) self.current_socket.settimeout(None) # Reloop in normal mode continue # Wait for socket to be available for read ok = self._wait_for_socket_recv() if not ok: # This is not really normal logger.warning("_wait_for_socket_recv returned False, self=%s", self) elif not self.__is_running(): logger.debug("_wait_for_socket_recv returned True, __is_running()==False, exiting, self=%s", self) return else: # Something to read... local_buf = self._read_from_socket() if not self.__is_running(): logger.debug("_read_from_socket returned, __is_running()==False, exiting, self=%s", self) elif local_buf is None: # This is not really normal logger.debug("_read_from_socket returned None, self=%s", self) elif len(local_buf) == 0: # This is not really normal logger.debug("_read_from_socket returned empty string, self=%s", self) # Gevent 1.0.2 : call disconnect self._disconnect_helper("_read_from_socket returned empty string") else: # Timestamps self._dt_last_recv = SolBase.datecurrent() # Notify if self._callback_receive: self._callback_receive(local_buf) else: logger.error("_callback_receive is None, check you implementation, self=%s", self) # Next read SolBase.sleep(0) except Exception as e: logger.warning("IN_LOOP Exception raised, ex=%s, self=%s", SolBase.extostr(e), self) except Exception as e: logger.error("METHOD Exception raised, ex=%s, self=%s", SolBase.extostr(e), self) finally: logger.debug("exiting now, self=%s", self) SolBase.sleep(0)
def stop_synch_internal(self): """ Stop processing our socket read/write. Reserved for PURE in-memory stop operations (greenlet stop, counter put mainly) NEVER, NEVER perform any kind of non-memory operations here. For instance, are FORDIDEN in higher level implementation of stop_synch_internal : - Any socket send/recv - Any external queries (redis/mongo, whatever) JUST HANDLE IN MEMORY STUFF. :return True if success, false otherwise. :rtype bool """ try: logger.debug( "TcpServerClientContext : disconnect : entering, self=%s", self) # Check if not self.is_connected: logger.debug( "TcpServerClientContext : disconnect : not connected, doing nothing, self=%s", self) return False # Signal (move on top, try to avoid some TcpManager warn logs while stopping) self.is_connected = False # Timeout unschedule self._unschedule_ssl_handshake_timeout() # Control unschedule self._unschedule_control_greenlet() # Disconnect # Close the socket in this case (should not cover mantis 1173) SolBase.safe_close_socket(self.current_socket) self.current_socket = None # Greenlet reset after is_connected=False (will help to exit itself) if self._read_greenlet: self._read_greenlet.kill() self._read_greenlet = None if self._write_greenlet: self._write_greenlet.kill() self._write_greenlet = None # Flush out the send queue now, and decrement pending bytes to send total_len = 0 while True: try: item = self.send_queue.get(False) if isinstance(item, bytes): total_len += len(item) elif isinstance(item, SignaledBuffer): total_len += len(item.binary_buffer) except Empty: break # Decrement logger.debug( "TcpServerClientContext : disconnect : decrementing, total_len=%s", total_len) Meters.aii("tcp.server.server_bytes_send_pending", -total_len) # Over logger.debug("TcpServerClientContext : disconnect : done, self=%s", self) return True except Exception as e: logger.error( "TcpServerClientContext : disconnect : Exception, ex=%s, self=%s", SolBase.extostr(e), self) return False finally: # Session duration stats sec = SolBase.datediff(self._dt_created) / 1000 Meters.dtci("tcp.server.session_duration_second", sec)