Exemple #1
0
    def __init__(self, balancer, reset_vm_callback, new_session_callback):
        # keeps track of what remote IP a server is servicing
        # False means there is a fresh VM ready for a new session
        # hosts are removed from this dict entirely during re-imaging
        self.srv_states = {}

        # TODO: lazy design, this should really be part of srv_states but then False can't
        # be used to denote a fresh VM
        self.user_datas = {}

        # maps remote IPs to internal IPs
        self.remote_sessions = {}

        # remote IPs that have connected recently
        self.temp_blacklist = set()

        # make sure srv_states and remote_sessions accesses stay coherent
        self.lock = aiorwlock.RWLock()

        # I noticed sometimes multiple connections would print the msg multiple times so this prevents the callback being ran more than once
        self.has_session_callbacked = False

        self.reset_vm_callback = reset_vm_callback
        self.new_session_callback = new_session_callback
        self.balancer = balancer
Exemple #2
0
    def __init__(self, dagMetaMfsPath, dagMetaHistoryMax=12, offline=False,
                 unpinOnUpdate=False, autoPreviousNode=True,
                 cipheredMeta=False,
                 autoUpdateDates=False, loop=None,
                 portalCacheTtl=60):
        super().__init__()

        self.loop = loop if loop else asyncio.get_event_loop()
        self.lock = aiorwlock.RWLock(loop=loop)
        self.loaded = asyncio.Future()
        self.portalCache = TTLCache(1, portalCacheTtl)

        self._curMetaEntry = None
        self._dagRoot = None
        self._dagMeta = None
        self._dagCid = None
        self._offline = offline
        self._dagMetaMaxHistoryItems = dagMetaHistoryMax
        self._dagMetaMfsPath = dagMetaMfsPath
        self._unpinOnUpdate = unpinOnUpdate
        self._autoPrevious = autoPreviousNode
        self._autoUpdateDates = autoUpdateDates
        self._cipheredMeta = cipheredMeta

        self.dagUpdated = AsyncSignal(str)
        self.available = AsyncSignal(object)

        self.changed.connect(lambda: ensure(self.ipfsSave()))
Exemple #3
0
    def __init__(self, channel, psService, gWindow, sticky=False):
        super(ChatRoomWidget, self).__init__(gWindow, sticky=sticky)

        self.lock = aiorwlock.RWLock()
        self.chans = ChatChannels()

        self.pSub = psSubscriber(f'chatroom.{channel}')
        self.pSub.add_async_listener(keyChatChanUserList,
                                     self.onChatChanUserList)
        self.pSub.add_async_listener(makeKeyPubChatTokens(channel),
                                     self.onChatTokenMessage)

        self.participantsModel = ParticipantsModel(self)
        self.participantsModel.setHorizontalHeaderLabels(
            ['Token', 'SpaceHandle'])

        self.chatView = ChatWebView(self)

        self.channel = channel
        self.subscriber = psSubscriber(psService.topic)
        self.psService = psService
        self.subscriber.add_async_listener(self.psService.psKey,
                                           self.onChatMessageReceived)
        self.chatWidget = QWidget()
        self.vLayout.addWidget(self.chatWidget)

        self.ui = ui_chatroom.Ui_Form()
        self.ui.setupUi(self.chatWidget)
        self.ui.hLayoutTop.addWidget(self.chatView)

        self.ui.hideUsersButton.clicked.connect(self.onHideUsers)

        self.ui.usersView.setModel(self.participantsModel)
        self.ui.usersView.hideColumn(0)
Exemple #4
0
class ThinkAioKafkaProducer(object):
    g_dictConnPool = {}
    g_rwlock = aiorwlock.RWLock()

    @classmethod
    async def send(cls, szHost, szTopic, szMsg):
        if cls.g_dictConnPool.get(szHost) is None:
            async with cls.g_rwlock.writer:
                if cls.g_dictConnPool.get(szHost) is None:
                    try:
                        producer = AIOKafkaProducer(
                            loop=asyncio.get_event_loop(),
                            bootstrap_servers=szHost)
                        await producer.start()

                        cls.g_dictConnPool[szHost] = producer
                    except Exception as e:
                        await g_aio_logger.error(e)

        producer = cls.g_dictConnPool.get(szHost)
        if producer is None:
            return -1

        try:
            # Produce message
            await producer.send_and_wait(szTopic, szMsg.encode("utf-8"))
            return len(szMsg)
        except Exception as e:
            del cls.g_dictConnPool[szHost]
            await producer.stop()
            return -1
Exemple #5
0
async def go():
    rwlock = aiorwlock.RWLock()

    async with rwlock.writer:
        print("inside writer: only one writer is possible")

    async with rwlock.reader:
        print("inside reader: multiple reader possible")
class ThinkAioMysql(object):
    g_dictConnPool = {}
    g_rwlock = aiorwlock.RWLock()

    @classmethod
    async def get_conn_pool_ex(cls, szGroup="mysql"):
        ret = await cls.get_conn_pool(
            host=ThinkConfig.get_default_config().get(szGroup, "host"),
            port=int(ThinkConfig.get_default_config().get(szGroup, "port")),
            user=ThinkConfig.get_default_config().get(szGroup, "user"),
            password=ThinkConfig.get_default_config().get(szGroup, "password"),
            db=ThinkConfig.get_default_config().get(szGroup, "db"),
            mincached=int(ThinkConfig.get_default_config().get_int(
                szGroup, "maxconnections") / 2),
            maxcached=int(ThinkConfig.get_default_config().get_int(
                szGroup, "maxconnections")),
            maxconnections=int(ThinkConfig.get_default_config().get(
                szGroup, "maxconnections")),
            charset="utf8",
            use_unicode=True)

        return ret

    @classmethod
    async def get_conn_pool(
            cls,
            host=ThinkConfig.get_default_config().get("mysql", "host"),
            port=int(ThinkConfig.get_default_config().get("mysql", "port")),
            user=ThinkConfig.get_default_config().get("mysql", "user"),
            password=ThinkConfig.get_default_config().get("mysql", "password"),
            db=ThinkConfig.get_default_config().get("mysql", "db"),
            mincached=int(ThinkConfig.get_default_config().get_int(
                "mysql", "maxconnections") / 2),
            maxcached=int(ThinkConfig.get_default_config().get_int(
                "mysql", "maxconnections")),
            maxconnections=int(ThinkConfig.get_default_config().get(
                "mysql", "maxconnections")),
            charset="utf8",
            use_unicode=True):
        szHostPortDb = "{}:{}-{}".format(host, port, db)

        if cls.g_dictConnPool.get(szHostPortDb) is None:
            async with cls.g_rwlock.writer:
                if cls.g_dictConnPool.get(szHostPortDb) is None:
                    connPool = await aiomysql.create_pool(
                        minsize=mincached,
                        maxsize=maxcached,
                        host=host,
                        user=user,
                        password=password,
                        db=db,
                        port=port,
                        charset=charset,
                        use_unicode=use_unicode)

                    cls.g_dictConnPool[szHostPortDb] = connPool

        return cls.g_dictConnPool.get(szHostPortDb)
Exemple #7
0
class ThinkAioRedisPool(object):

    g_dictConnPool = {}
    g_rwlock = aiorwlock.RWLock()

    @classmethod
    async def get_conn_pool_ex(cls, szGroup="redis"):
        ret = await cls.get_conn_pool(host=ThinkConfig.get_default_config().get(szGroup, "host")
                            , password=ThinkConfig.get_default_config().get(szGroup, "password")
                            , port=ThinkConfig.get_default_config().get_int(szGroup, "port")
                            , db=ThinkConfig.get_default_config().get_int(szGroup, "db")
                            , max_connections=int(ThinkConfig.get_default_config().get_int(szGroup, "max_connections")))

        return ret

    @classmethod
    async def get_conn_pool(cls
                            , host=ThinkConfig.get_default_config().get("redis", "host")
                            , password=ThinkConfig.get_default_config().get("redis", "password")
                            , port=ThinkConfig.get_default_config().get_int("redis", "port")
                            , db=ThinkConfig.get_default_config().get_int("redis", "db")
                            , max_connections=int(ThinkConfig.get_default_config().get_int("redis", "max_connections"))):

        szHostPortDb = "{}:{}-{}".format(host, port, db)
        if cls.g_dictConnPool.get(szHostPortDb) is None:

            async with cls.g_rwlock.writer:
                if cls.g_dictConnPool.get(szHostPortDb) is None:
                    connPool = await cls.mk_conn_pool(host, password, port, db, max_connections)

                    cls.g_dictConnPool[szHostPortDb] = connPool

        return cls.g_dictConnPool.get(szHostPortDb)

    @classmethod
    async def mk_conn_pool(cls
                     , host='127.0.0.1'
                     , password=None
                     , port=6379
                     , db=0
                     , max_connections=16):

        szAddress = "redis://{}:{}".format(host, port)
        _conn_pool = await aioredis.create_pool(szAddress, db=db, password=password, minsize=2, maxsize=max_connections)
        return _conn_pool


# async def main():
#     # conn_pool = await ThinkAioRedisPool.get_default_conn_pool()
#     with await (await ThinkAioRedisPool.get_conn_pool_ex()) as conn:
#         await conn.execute('set', 'fxxxxk', get_current_time_str())
#
#         szVal = await conn.execute("get", "fxxxxk")
#         print("return val: ", szVal.decode())
#
# if __name__ == '__main__':
#     asyncio.get_event_loop().run_until_complete(main())
    async def cnt_dec(self, request):
        """
        dec count interface
        :param request:
        :return:
        """
        post = await request.post()
        logging.info('post %s', post)
        company_name = post.get("company")
        cnt = int(post.get("cnt", 0))
        rwlock = self.company_lock.get(company_name, "")
        if not rwlock:
            # rwlock = locks.Lock(loop=self.loop)
            rwlock = aiorwlock.RWLock(loop=self.loop)
            self.company_lock[company_name] = rwlock
        # with await rwlock:
        async with rwlock.writer:
            uuid_s = uuid.uuid1().hex
            logging.debug("[%s]---[%s]", uuid_s, id(rwlock))
            msg = dict()
            sql = "select * from shield.company where name=%s"
            po = await self.db.get(sql, company_name)
            if not po:  # cant not found company
                logging.error("not found company name [%s]", company_name)
                msg["code"] = 404
                msg["code"] = "not found company"
                return self.response(request, msg)
            po_cnt = int(po.get("count"))
            old_cnt = po.get("count")
            if po_cnt == 0:
                logging.error("company [%s] cnt is 0", company_name)
                msg["code"] = 400
                msg["reason"] = "cnt is 0"
                return self.response(request, msg)
            if po_cnt < cnt:  # database count is not enough
                logging.error("company [%s] count is not enough", company_name)
                msg["code"] = 405
                msg["reason"] = "count is not enough"
                return self.response(request, msg)
            res = po_cnt - cnt
            update_sql = "update shield.company set count=%s where name=%s"
            args_values = [res, company_name]
            update_res = await self.db.execute(update_sql, args_values)
            if not isinstance(update_res, int):  # 执行update 失败
                logging.error("sql update is err:", update_res)
                msg["code"] = 403
                msg["reason"] = "inc fail"
                return self.response(request, msg)
            logging.info(
                "uuid [%s] lock [%s] company [%s] dec cnt [%s] old cnt [%s] true will is [%s] success",
                uuid_s, id(rwlock), company_name, cnt, old_cnt, res)

            msg["code"] = 200
            msg["reason"] = "ok"
            return self.response(request, msg)
Exemple #9
0
    def new(cls,
            path: pathlib.Path,
            logger: logging.Logger = tensorcraft.logging.internal_logger):
        self = cls()
        logger.info("Using file storage experiment engine")

        self.logger = logger
        self.rwlock = aiorwlock.RWLock()
        self.db = tinydb.TinyDB(path=path.joinpath("experiments.json"),
                                default_table="experiments")
        return self
Exemple #10
0
async def go():
    rwlock = aiorwlock.RWLock()

    # acquire reader lock
    async with rwlock.reader_lock:
        print('inside reader lock')
        await asyncio.sleep(0.1)

    # acquire writer lock
    async with rwlock.writer_lock:
        print('inside writer lock')
        await asyncio.sleep(0.1)
Exemple #11
0
def go():
    rwlock = aiorwlock.RWLock(loop=loop)

    # acquire reader lock
    with (yield from rwlock.reader_lock):
        print("inside reader lock")

        yield from asyncio.sleep(0.1, loop=loop)

    # acquire writer lock
    with (yield from rwlock.writer_lock):
        print("inside writer lock")

        yield from asyncio.sleep(0.1, loop=loop)
Exemple #12
0
    def __init__(self, ctx):
        super().__init__()

        self.app = QApplication.instance()
        self.ctx = ctx
        self.lock = aiorwlock.RWLock()
        self.evStopWatcher = asyncio.Event()
        self._byPeerId = collections.OrderedDict()
        self._byHandle = collections.OrderedDict()
        self._didGraphLStatus = []
        self._didAuthInp = {}
        self._pgScanCount = 0

        self.peerAuthenticated.connectTo(self.onPeerAuthenticated)
Exemple #13
0
async def go():
    rwlock = aiorwlock.RWLock()

    # acquire reader lock
    await rwlock.reader_lock.acquire()
    try:
        print('inside reader lock')

        await asyncio.sleep(0.1)
    finally:
        rwlock.reader_lock.release()

    # acquire writer lock
    await rwlock.writer_lock.acquire()
    try:
        print('inside writer lock')

        await asyncio.sleep(0.1)
    finally:
        rwlock.writer_lock.release()
Exemple #14
0
def go():
    rwlock = aiorwlock.RWLock(loop=loop)

    # acquire reader lock
    yield from rwlock.reader_lock.acquire()
    try:
        print("inside reader lock")

        yield from asyncio.sleep(0.1, loop=loop)
    finally:
        yield from rwlock.reader_lock.release()

    # acquire writer lock
    yield from rwlock.writer_lock.acquire()
    try:
        print("inside writer lock")

        yield from asyncio.sleep(0.1, loop=loop)
    finally:
        yield from rwlock.writer_lock.release()
Exemple #15
0
    async def new(cls,
                  storage: AbstractStorage,
                  preload: bool = False,
                  logger: logging.Logger = internal_logger):
        self = cls()
        self.logger = logger
        self.storage = storage
        self.lock = aiorwlock.RWLock()
        self.models = {}

        self.storage.on_save.append(self.save_to_cache)
        self.storage.on_delete.append(self.delete_from_cache)

        if not preload:
            return self

        async for m in self.all():
            logger.info("Loading {0} model".format(m))
            await self.unsafe_load(m.name, m.tag)

        return self
 async def cnt_set(self, request):
     """
     set count interface
     :param request:
     :return:
     """
     post = await request.post()
     logging.info('post %s', post)
     company_name = post.get("company")
     cnt = post.get("cnt")
     sql = "update shield.company set count=%s where name=%s"
     args_values = [cnt, company_name]
     rwlock = self.company_lock.get(company_name, "")
     if not rwlock:
         # rwlock = locks.Lock(loop=self.loop)
         rwlock = aiorwlock.RWLock(loop=self.loop)
         self.company_lock[company_name] = rwlock
     # with await rwlock:
     async with rwlock.writer:
         msg = dict()
         po_sql = "select * from shield.company where name=%s"
         po = await self.db.get(po_sql, company_name)
         if not po:  # cant not found company
             logging.error("not found company name [%s]", company_name)
             msg["code"] = 404
             msg["code"] = "not found company"
             return self.response(request, msg)
         res = await self.db.execute(sql, args_values)
         if not isinstance(res, int):
             logging.error("sql update is err:", res)
             msg["code"] = 403
             msg["reason"] = "set fail"
             return self.response(request, msg)
         logging.info("company [%s] set cnt [%s] is success", company_name,
                      cnt)
         msg["code"] = 200
         msg["reason"] = "ok"
         return self.response(request, msg)
Exemple #17
0
class ThinkAioPG(object):
    g_dictConnPool = {}
    g_rwlock = aiorwlock.RWLock()

    @classmethod
    async def get_conn_pool_ex(cls, szGroup="postgresql"):
        ret = await cls.get_conn_pool(host=ThinkConfig.get_default_config().get(szGroup, "host")
                                      , port=int(ThinkConfig.get_default_config().get(szGroup, "port"))
                                      , user=ThinkConfig.get_default_config().get(szGroup, "user")
                                      , password=ThinkConfig.get_default_config().get(szGroup, "password")
                                      , db=ThinkConfig.get_default_config().get(szGroup, "db")
                                      , mincached=int(
                ThinkConfig.get_default_config().get_int(szGroup, "maxconnections") / 2)
                                      ,
                                      maxcached=int(ThinkConfig.get_default_config().get_int(szGroup, "maxconnections"))
                                      , maxconnections=int(
                ThinkConfig.get_default_config().get(szGroup, "maxconnections"))
                                      , charset="utf8"
                                      , use_unicode=True)

        return ret

    @classmethod
    async def get_conn_pool(cls
                            , host=ThinkConfig.get_default_config().get("postgresql", "host")
                            , port=int(ThinkConfig.get_default_config().get("postgresql", "port"))
                            , user=ThinkConfig.get_default_config().get("postgresql", "user")
                            , password=ThinkConfig.get_default_config().get("postgresql", "password")
                            , db=ThinkConfig.get_default_config().get("postgresql", "db")
                            , mincached=int(ThinkConfig.get_default_config().get_int("postgresql", "maxconnections") / 2)
                            , maxcached=int(ThinkConfig.get_default_config().get_int("postgresql", "maxconnections"))
                            , maxconnections=int(ThinkConfig.get_default_config().get("mysql", "maxconnections"))
                            , charset="utf8"
                            , use_unicode=True):
        szHostPortDb = "{}:{}-{}".format(host, port, db)

        if cls.g_dictConnPool.get(szHostPortDb) is None:
            async with cls.g_rwlock.writer:
                if cls.g_dictConnPool.get(szHostPortDb) is None:
                    dsn = 'dbname={} user={} password={} host={} port={}'.format(db, user, password, host, port)
                    connPool = await aiopg.create_pool(
                        dsn = dsn
                        , minsize=mincached
                        , maxsize=maxcached)

                    cls.g_dictConnPool[szHostPortDb] = connPool

        return cls.g_dictConnPool.get(szHostPortDb)


# async def main():
#     with await (await ThinkAioPG.get_conn_pool_ex()) as conn:
#         cur = await conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
#
#         await cur.execute("SELECT 1 as fxxk")
#         ret = []
#         async for row in cur:
#             ret.append(dict(row))
#
#         print(ret)
#
#         # await cur.execute("INSERT INTO t_test_col(id, name, score) VALUES (%s, %s, %s)", (1024001, "Thinkman Wang", 99))
#
# if __name__ == '__main__':
#     loop = asyncio.get_event_loop()
#     loop.run_until_complete(main())
Exemple #18
0
    def __init__(
        self,
        _callable: Callable,
        deployment_name: str,
        replica_tag: ReplicaTag,
        deployment_config: DeploymentConfig,
        user_config: Any,
        version: DeploymentVersion,
        is_function: bool,
        controller_handle: ActorHandle,
    ) -> None:
        self.deployment_config = deployment_config
        self.deployment_name = deployment_name
        self.replica_tag = replica_tag
        self.callable = _callable
        self.is_function = is_function
        self.user_config = user_config
        self.version = version
        self.rwlock = aiorwlock.RWLock()

        user_health_check = getattr(_callable, HEALTH_CHECK_METHOD, None)
        if not callable(user_health_check):

            def user_health_check():
                pass

        self.user_health_check = sync_to_async(user_health_check)

        self.num_ongoing_requests = 0

        self.request_counter = metrics.Counter(
            "serve_deployment_request_counter",
            description=
            ("The number of queries that have been processed in this replica."
             ),
            tag_keys=("deployment", "replica"),
        )
        self.request_counter.set_default_tags({
            "deployment": self.deployment_name,
            "replica": self.replica_tag
        })

        self.error_counter = metrics.Counter(
            "serve_deployment_error_counter",
            description=(
                "The number of exceptions that have occurred in this replica."
            ),
            tag_keys=("deployment", "replica"),
        )
        self.error_counter.set_default_tags({
            "deployment": self.deployment_name,
            "replica": self.replica_tag
        })

        self.restart_counter = metrics.Counter(
            "serve_deployment_replica_starts",
            description=
            ("The number of times this replica has been restarted due to failure."
             ),
            tag_keys=("deployment", "replica"),
        )
        self.restart_counter.set_default_tags({
            "deployment": self.deployment_name,
            "replica": self.replica_tag
        })

        self.processing_latency_tracker = metrics.Histogram(
            "serve_deployment_processing_latency_ms",
            description="The latency for queries to be processed.",
            boundaries=DEFAULT_LATENCY_BUCKET_MS,
            tag_keys=("deployment", "replica"),
        )
        self.processing_latency_tracker.set_default_tags({
            "deployment":
            self.deployment_name,
            "replica":
            self.replica_tag
        })

        self.num_processing_items = metrics.Gauge(
            "serve_replica_processing_queries",
            description="The current number of queries being processed.",
            tag_keys=("deployment", "replica"),
        )
        self.num_processing_items.set_default_tags({
            "deployment": self.deployment_name,
            "replica": self.replica_tag
        })

        self.restart_counter.inc()

        self._shutdown_wait_loop_s = deployment_config.graceful_shutdown_wait_loop_s

        if deployment_config.autoscaling_config:
            process_remote_func = controller_handle.record_autoscaling_metrics.remote
            config = deployment_config.autoscaling_config
            start_metrics_pusher(
                interval_s=config.metrics_interval_s,
                collection_callback=self._collect_autoscaling_metrics,
                metrics_process_func=process_remote_func,
            )

        # NOTE(edoakes): we used to recommend that users use the "ray" logger
        # and tagged the logs with metadata as below. We now recommend using
        # the "ray.serve" 'component logger' (as of Ray 1.13). This is left to
        # maintain backwards compatibility with users who were using the
        # existing logger. We can consider removing it in Ray 2.0.
        ray_logger = logging.getLogger("ray")
        for handler in ray_logger.handlers:
            handler.setFormatter(
                logging.Formatter(
                    handler.formatter._fmt +
                    f" component=serve deployment={self.deployment_name} "
                    f"replica={self.replica_tag}"))
Exemple #19
0
registered_members = {}

# Format = {<Post>: {<Student Number>: (<Candidate Object>, <Email>), ...}, ...}
standing = {}

# Format = {<Title>: <Description>, ...}
referenda = {}
referendum_options = [Candidate('For'), Candidate('Against')]

# Format = {<Student Number>: <Preferred Name>, ...}
preferred_names = {}

# Format = {<User ID>: [(<Candidate Student ID>, <Message ID>), ...], ...}
voting_messages = {}

current_live_post_lock = aiorwlock.RWLock()
votes_lock = aiorwlock.RWLock()
voted_lock = asyncio.Lock()

def get_members():
    members = society_members.get_members()
    members[0] = 'RON (Re-Open-Nominations)'

    # Substitute preferred names
    for id in preferred_names:
        members[id] = preferred_names[id]

    return members


def save_voters():
Exemple #20
0
 def new(cls, path: pathlib.Path):
     self = cls()
     self._rw_lock = aiorwlock.RWLock()
     self._db = tinydb.TinyDB(path=path.joinpath("metadata.json"),
                              default_table="metadata")
     return self
 def __init__(self, p):
     self.rwlock = aiorwlock.RWLock()
     self.inner = p
Exemple #22
0
 async def write_locked(self):
     async with self._rw_lock.writer_lock:
         db = FsModelsMetadata()
         db._db = self._db
         db._rw_lock = aiorwlock.RWLock(fast=True)
         yield db
Exemple #23
0
    def __init__(
        self,
        _callable: Callable,
        deployment_name: str,
        replica_tag: ReplicaTag,
        deployment_config: DeploymentConfig,
        user_config: Any,
        version: DeploymentVersion,
        is_function: bool,
        controller_handle: ActorHandle,
    ) -> None:
        self.deployment_config = deployment_config
        self.deployment_name = deployment_name
        self.replica_tag = replica_tag
        self.callable = _callable
        self.is_function = is_function
        self.user_config = user_config
        self.version = version
        self.rwlock = aiorwlock.RWLock()

        user_health_check = getattr(_callable, HEALTH_CHECK_METHOD, None)
        if not callable(user_health_check):

            def user_health_check():
                pass

        self.user_health_check = sync_to_async(user_health_check)

        self.num_ongoing_requests = 0

        self.request_counter = metrics.Counter(
            "serve_deployment_request_counter",
            description=("The number of queries that have been "
                         "processed in this replica."),
            tag_keys=("deployment", "replica"),
        )
        self.request_counter.set_default_tags({
            "deployment": self.deployment_name,
            "replica": self.replica_tag
        })

        self.error_counter = metrics.Counter(
            "serve_deployment_error_counter",
            description=("The number of exceptions that have "
                         "occurred in this replica."),
            tag_keys=("deployment", "replica"),
        )
        self.error_counter.set_default_tags({
            "deployment": self.deployment_name,
            "replica": self.replica_tag
        })

        self.restart_counter = metrics.Counter(
            "serve_deployment_replica_starts",
            description=("The number of times this replica "
                         "has been restarted due to failure."),
            tag_keys=("deployment", "replica"),
        )
        self.restart_counter.set_default_tags({
            "deployment": self.deployment_name,
            "replica": self.replica_tag
        })

        self.processing_latency_tracker = metrics.Histogram(
            "serve_deployment_processing_latency_ms",
            description="The latency for queries to be processed.",
            boundaries=DEFAULT_LATENCY_BUCKET_MS,
            tag_keys=("deployment", "replica"),
        )
        self.processing_latency_tracker.set_default_tags({
            "deployment":
            self.deployment_name,
            "replica":
            self.replica_tag
        })

        self.num_processing_items = metrics.Gauge(
            "serve_replica_processing_queries",
            description="The current number of queries being processed.",
            tag_keys=("deployment", "replica"),
        )
        self.num_processing_items.set_default_tags({
            "deployment": self.deployment_name,
            "replica": self.replica_tag
        })

        self.restart_counter.inc()

        self._shutdown_wait_loop_s = deployment_config.graceful_shutdown_wait_loop_s

        if deployment_config.autoscaling_config:
            config = deployment_config.autoscaling_config
            start_metrics_pusher(
                interval_s=config.metrics_interval_s,
                collection_callback=self._collect_autoscaling_metrics,
                controller_handle=controller_handle,
            )

        ray_logger = logging.getLogger("ray")
        for handler in ray_logger.handlers:
            handler.setFormatter(
                logging.Formatter(
                    handler.formatter._fmt +
                    f" component=serve deployment={self.deployment_name} "
                    f"replica={self.replica_tag}"))
 def __init__(self, upstreams=None):
     self.upstreams = dict([(s, dict()) for s in Status])
     if upstreams:
         self.upstreams[Status.WORKING] = upstreams
     self.rwlock = aiorwlock.RWLock()
     self.logger = logging.getLogger(self.__class__.__name__)