Пример #1
0
    def close_all(self):
        """
        Close all connections
        """
        n = 0
        while not self.pool.empty():
            conn = self.pool.get_nowait()
            self._connection_close(conn)
            n += 1

        Meters.aii("k.db_pool.base.cur_size", increment_value=-n)
        Meters.ai("k.db_pool.base.max_size").set(max(Meters.aig("k.db_pool.base.max_size"), Meters.aig("k.db_pool.base.cur_size")))
        self.size = 0
Пример #2
0
    def _watchdog_run(self):
        """
        Watch dog
        :return Nothing
        """

        if not self._is_started:
            return

        reschedule = True
        try:
            # Current meters
            Meters.ai(self.meters_prefix + "mcs.cur_bytes").set(
                self._current_data_bytes.get())
            Meters.ai(self.meters_prefix + "mcs.cur_size_hash").set(
                len(self._hash_key))

            # Evict
            ms = SolBase.mscurrent()
            evicted_count = self._evict_all_expired_keys()

            # Check (evict can take some time)
            if not self._is_started:
                return

            Meters.dtci(self.meters_prefix + "mcs.cache_dtc_watchdog",
                        SolBase.msdiff(ms))

            # Stat
            if evicted_count > 0:
                Meters.aii(self.meters_prefix + "mcs.cache_evict_ttl_watchdog",
                           evicted_count)

            # Callback (unittest)
            if self._cb_watchdog:
                reschedule = self._cb_watchdog(evicted_count)

        except Exception as e:
            if self._is_started:
                logger.error("_watchdog_run : Exception, id=%s, e=%s",
                             id(self), SolBase.extostr(e))
                Meters.aii(self.meters_prefix + "mcs.cache_ex")
            else:
                logger.debug("_watchdog_run : Exception, id=%s, e=%s",
                             id(self), SolBase.extostr(e))
                reschedule = False
        finally:
            Meters.aii(self.meters_prefix + "mcs.cache_watchdog_run_count")
            # Schedule next write
            if reschedule and self._is_started:
                self._schedule_next_watchdog()
Пример #3
0
    def connection_acquire(self):
        """
        Get a connection
        # TODO : In case client cannot release (greenlet kill) : add a spawn_later to protect pull exhaust (+ kill the connection in this case)
        # TODO : this requires a timeout by config (lets say 60 sec by default)
        :return: object
        :rtype object
        """

        with self.pool_lock:

            Meters.aii("k.db_pool.base.call.connection_acquire")

            if self.pool.qsize() > 0:
                # ------------------------------
                # GET CONNECTION FROM POOL
                # ------------------------------
                conn = self.pool.get()

                # Ping it
                if not self._connection_ping(conn):
                    # Failed => close it
                    self._connection_close(conn)

                    # Re-create a new one (we just closed a connection)
                    conn = self._connection_create()

                # Send it back
                return conn
            elif self.size >= self.max_size:
                # ------------------------------
                # POOL MAXED => ERROR
                # ------------------------------
                Meters.aii("k.db_pool.base.pool_maxed")
                raise Exception("Pool maxed, size=%s, max_size=%s" % (self.size, self.max_size))
            else:
                # ------------------------------
                # POOL NOT MAXED, NO CONNECTION IN POOL => NEW CONNECTION
                # ------------------------------
                try:
                    conn = self._connection_create()
                    self.size += 1
                    Meters.aii("k.db_pool.base.cur_size", increment_value=1)
                    Meters.ai("k.db_pool.base.max_size").set(max(Meters.aig("k.db_pool.base.max_size"), Meters.aig("k.db_pool.base.cur_size")))
                except Exception:
                    raise
                return conn
Пример #4
0
    def test_meters(self):
        """
        Test
        """

        ai1a = Meters.ai("ai1")
        self.assertIsInstance(ai1a, AtomicIntSafe)
        ai1b = Meters.ai("ai1")
        self.assertEqual(id(ai1a), id(ai1b))

        ai1a = Meters.aii("ai1")
        self.assertEqual(ai1a.get(), 1)
        ai1a = Meters.aii("ai1", 2)
        self.assertEqual(ai1a.get(), 3)
        self.assertEqual(ai1a.get(), Meters.aig("ai1"))

        af1a = Meters.af("af1")
        self.assertIsInstance(af1a, AtomicFloatSafe)
        af1b = Meters.af("af1")
        self.assertEqual(id(af1a), id(af1b))

        af1a = Meters.afi("af1")
        self.assertEqual(af1a.get(), 1.0)
        af1a = Meters.afi("af1", 2.0)
        self.assertEqual(af1a.get(), 3.0)
        self.assertEqual(af1a.get(), Meters.afg("af1"))

        dtc1a = Meters.dtc("dtc1")
        self.assertIsInstance(dtc1a, DelayToCountSafe)
        dtc1b = Meters.dtc("dtc1")
        self.assertEqual(id(dtc1a), id(dtc1b))

        Meters.dtci("dtc1", 0)
        Meters.dtci("dtc1", 50)
        Meters.dtci("dtc1", 100)
        self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[0].get(), 1)
        self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[50].get(), 1)
        self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[100].get(), 1)
        self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[500].get(), 0)

        Meters.dtc("dtc1").to_dict()

        # Write
        Meters.write_to_logger()
Пример #5
0
    def test_meters_to_udp(self):
        """
        Test
        """

        ai1a = Meters.ai("ai1")
        self.assertIsInstance(ai1a, AtomicIntSafe)
        ai1b = Meters.ai("ai1")
        self.assertEqual(id(ai1a), id(ai1b))

        ai1a = Meters.aii("ai1")
        self.assertEqual(ai1a.get(), 1)
        ai1a = Meters.aii("ai1", 2)
        self.assertEqual(ai1a.get(), 3)
        self.assertEqual(ai1a.get(), Meters.aig("ai1"))

        af1a = Meters.af("af1")
        self.assertIsInstance(af1a, AtomicFloatSafe)
        af1b = Meters.af("af1")
        self.assertEqual(id(af1a), id(af1b))

        af1a = Meters.afi("af1")
        self.assertEqual(af1a.get(), 1.0)
        af1a = Meters.afi("af1", 2.0)
        self.assertEqual(af1a.get(), 3.0)
        self.assertEqual(af1a.get(), Meters.afg("af1"))

        dtc1a = Meters.dtc("dtc1")
        self.assertIsInstance(dtc1a, DelayToCountSafe)
        dtc1b = Meters.dtc("dtc1")
        self.assertEqual(id(dtc1a), id(dtc1b))

        Meters.dtci("dtc1", 0)
        Meters.dtci("dtc1", 50)
        Meters.dtci("dtc1", 100)
        self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[0].get(), 1)
        self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[50].get(), 1)
        self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[100].get(), 1)
        self.assertEquals(Meters._hash_meter["dtc"]["dtc1#"]._sorted_dict[500].get(), 0)

        # Write
        Meters.write_to_logger()

        # Serialize
        ar_json = Meters.meters_to_udp_format(send_pid=True, send_dtc=True)
        logger.info("Got ar_json=%s", ar_json)
        for cur_ar in ar_json:
            logger.info("Got cur_ar=%s", cur_ar)

        # Serialize, no dtc
        ar_json = Meters.meters_to_udp_format(send_pid=True, send_dtc=False)
        logger.info("Got ar_json=%s", ar_json)
        for cur_ar in ar_json:
            logger.info("Got cur_ar=%s", cur_ar)

        # Send to daemon (assuming its up locally)
        Meters.send_udp_to_knockdaemon()
        Meters.send_udp_to_knockdaemon(send_dtc=True)
        Meters.send_udp_to_knockdaemon(send_dtc=False)

        # ------------------------
        # UDP Scheduler test
        # ------------------------

        # Check
        self.assertIsNone(Meters.UDP_SCHEDULER_GREENLET)
        self.assertFalse(Meters.UDP_SCHEDULER_STARTED)

        # Start
        Meters.udp_scheduler_start(send_interval_ms=500)

        # Check
        self.assertIsNotNone(Meters.UDP_SCHEDULER_GREENLET)
        self.assertTrue(Meters.UDP_SCHEDULER_STARTED)

        # Start again
        Meters.udp_scheduler_start(send_interval_ms=500)

        # Check again
        self.assertIsNotNone(Meters.UDP_SCHEDULER_GREENLET)
        self.assertTrue(Meters.UDP_SCHEDULER_STARTED)

        # Interval is 500 => we sleep 3.250 sec, we assume we must have at least 500, 1000, 1500, 2000, 2500, 3000 run => 6 runs
        SolBase.sleep(3250)

        # Check
        self.assertGreaterEqual(Meters.aig("k.meters.udp.run.ok"), 6)
        self.assertEqual(Meters.aig("k.meters.udp.run.ex"), 0)
        self.assertIsNotNone(Meters.UDP_SCHEDULER_GREENLET)
        self.assertTrue(Meters.UDP_SCHEDULER_STARTED)

        # We stop
        Meters.udp_scheduler_stop()
        self.assertIsNone(Meters.UDP_SCHEDULER_GREENLET)
        self.assertFalse(Meters.UDP_SCHEDULER_STARTED)

        # Sleep again and check no more running
        cur_run = Meters.aig("k.meters.udp.run.ok")
        SolBase.sleep(2000)
        self.assertEqual(cur_run, Meters.aig("k.meters.udp.run.ok"))