async def run(self, server): gcs_channel = self._dashboard_head.aiogrpc_gcs_channel self._gcs_node_info_stub = \ gcs_service_pb2_grpc.NodeInfoGcsServiceStub(gcs_channel) await asyncio.gather(self._update_nodes(), self._update_node_stats(), self._update_log_info(), self._update_error_info())
def register_gcs_client(self, gcs_channel: grpc.aio.Channel): self._gcs_actor_info_stub = gcs_service_pb2_grpc.ActorInfoGcsServiceStub( gcs_channel) self._gcs_pg_info_stub = gcs_service_pb2_grpc.PlacementGroupInfoGcsServiceStub( gcs_channel) self._gcs_node_info_stub = gcs_service_pb2_grpc.NodeInfoGcsServiceStub( gcs_channel) self._gcs_worker_info_stub = gcs_service_pb2_grpc.WorkerInfoGcsServiceStub( gcs_channel)
def _connect(self): self._channel.connect() self._kv_stub = gcs_service_pb2_grpc.InternalKVGcsServiceStub( self._channel.channel()) self._runtime_env_stub = gcs_service_pb2_grpc.RuntimeEnvGcsServiceStub( self._channel.channel()) self._node_info_stub = gcs_service_pb2_grpc.NodeInfoGcsServiceStub( self._channel.channel()) self._job_info_stub = gcs_service_pb2_grpc.JobInfoGcsServiceStub( self._channel.channel())
async def run(self): # Create an aioredis client for all modules. self.aioredis_client = await aioredis.create_redis_pool( address=self.redis_address, password=self.redis_password) # Waiting for GCS is ready. while True: try: gcs_address = await self.aioredis_client.get( dashboard_consts.REDIS_KEY_GCS_SERVER_ADDRESS) if not gcs_address: raise Exception("GCS address not found.") logger.info("Connect to GCS at %s", gcs_address) channel = aiogrpc.insecure_channel(gcs_address) except Exception as ex: logger.error("Connect to GCS failed: %s, retry...", ex) await asyncio.sleep( dashboard_consts.CONNECT_GCS_INTERVAL_SECONDS) else: self.aiogrpc_gcs_channel = channel break # Create a NodeInfoGcsServiceStub. self._gcs_node_info_stub = gcs_service_pb2_grpc.NodeInfoGcsServiceStub( self.aiogrpc_gcs_channel) async def _async_notify(): """Notify signals from queue.""" while True: co = await dashboard_utils.NotifyQueue.get() try: await co except Exception as e: logger.exception(e) async def _purge_data(): """Purge data in datacenter.""" while True: await asyncio.sleep( dashboard_consts.PURGE_DATA_INTERVAL_SECONDS) try: await DataOrganizer.purge() except Exception as e: logger.exception(e) modules = self._load_modules() # Freeze signal after all modules loaded. dashboard_utils.SignalManager.freeze() await asyncio.gather(self._update_nodes(), _async_notify(), _purge_data(), *(m.run() for m in modules))
def __init__(self, redis_address, autoscaling_config, redis_password=None, prefix_cluster_info=False, monitor_ip=None, stop_event: Optional[Event] = None): # Initialize the Redis clients. ray.state.state._initialize_global_state( redis_address, redis_password=redis_password) self.redis = ray._private.services.create_redis_client( redis_address, password=redis_password) if monitor_ip: self.redis.set("AutoscalerMetricsAddress", f"{monitor_ip}:{AUTOSCALER_METRIC_PORT}") (ip, port) = redis_address.split(":") # Initialize the gcs stub for getting all node resource usage. gcs_address = self.redis.get("GcsServerAddress").decode("utf-8") options = (("grpc.enable_http_proxy", 0), ) gcs_channel = ray._private.utils.init_grpc_channel( gcs_address, options) # TODO: Use gcs client for this self.gcs_node_resources_stub = \ gcs_service_pb2_grpc.NodeResourceInfoGcsServiceStub(gcs_channel) self.gcs_node_info_stub = \ gcs_service_pb2_grpc.NodeInfoGcsServiceStub(gcs_channel) # Set the redis client and mode so _internal_kv works for autoscaler. worker = ray.worker.global_worker worker.redis_client = self.redis gcs_client = GcsClient.create_from_redis(self.redis) _initialize_internal_kv(gcs_client) worker.mode = 0 head_node_ip = redis_address.split(":")[0] self.redis_address = redis_address self.redis_password = redis_password if os.environ.get("RAY_FAKE_CLUSTER"): self.load_metrics = LoadMetrics(local_ip=FAKE_HEAD_NODE_ID) else: self.load_metrics = LoadMetrics(local_ip=head_node_ip) self.last_avail_resources = None self.event_summarizer = EventSummarizer() self.prefix_cluster_info = prefix_cluster_info # Can be used to signal graceful exit from monitor loop. self.stop_event = stop_event # type: Optional[Event] self.autoscaling_config = autoscaling_config self.autoscaler = None # If set, we are in a manually created cluster (non-autoscaling) and # simply mirroring what the GCS tells us the cluster node types are. self.readonly_config = None self.prom_metrics = AutoscalerPrometheusMetrics() if monitor_ip and prometheus_client: # If monitor_ip wasn't passed in, then don't attempt to start the # metric server to keep behavior identical to before metrics were # introduced try: logger.info( "Starting autoscaler metrics server on port {}".format( AUTOSCALER_METRIC_PORT)) prometheus_client.start_http_server( port=AUTOSCALER_METRIC_PORT, addr="127.0.0.1" if head_node_ip == "127.0.0.1" else "", registry=self.prom_metrics.registry) except Exception: logger.exception( "An exception occurred while starting the metrics server.") elif not prometheus_client: logger.warning("`prometheus_client` not found, so metrics will " "not be exported.") logger.info("Monitor: Started")
async def run(self): # Create an aioredis client for all modules. try: self.aioredis_client = await dashboard_utils.get_aioredis_client( self.redis_address, self.redis_password, dashboard_consts.CONNECT_REDIS_INTERNAL_SECONDS, dashboard_consts.RETRY_REDIS_CONNECTION_TIMES) except (socket.gaierror, ConnectionError): logger.error( "Dashboard head exiting: " "Failed to connect to redis at %s", self.redis_address) sys.exit(-1) # Create a http session for all modules. self.http_session = aiohttp.ClientSession( loop=asyncio.get_event_loop()) # Waiting for GCS is ready. while True: try: gcs_address = await self.aioredis_client.get( dashboard_consts.REDIS_KEY_GCS_SERVER_ADDRESS) if not gcs_address: raise Exception("GCS address not found.") logger.info("Connect to GCS at %s", gcs_address) options = (("grpc.enable_http_proxy", 0), ) channel = aiogrpc.insecure_channel(gcs_address, options=options) except Exception as ex: logger.error("Connect to GCS failed: %s, retry...", ex) await asyncio.sleep( dashboard_consts.CONNECT_GCS_INTERVAL_SECONDS) else: self.aiogrpc_gcs_channel = channel break # Create a NodeInfoGcsServiceStub. self._gcs_node_info_stub = gcs_service_pb2_grpc.NodeInfoGcsServiceStub( self.aiogrpc_gcs_channel) # Start a grpc asyncio server. await self.server.start() async def _async_notify(): """Notify signals from queue.""" while True: co = await dashboard_utils.NotifyQueue.get() try: await co except Exception: logger.exception(f"Error notifying coroutine {co}") modules = self._load_modules() # Http server should be initialized after all modules loaded. app = aiohttp.web.Application() app.add_routes(routes=routes.bound_routes()) runner = aiohttp.web.AppRunner(app) await runner.setup() last_ex = None for i in range(1 + self.http_port_retries): try: site = aiohttp.web.TCPSite(runner, self.http_host, self.http_port) await site.start() break except OSError as e: last_ex = e self.http_port += 1 logger.warning("Try to use port %s: %s", self.http_port, e) else: raise Exception(f"Failed to find a valid port for dashboard after " f"{self.http_port_retries} retries: {last_ex}") http_host, http_port, *_ = site._server.sockets[0].getsockname() http_host = self.ip if ipaddress.ip_address( http_host).is_unspecified else http_host logger.info("Dashboard head http address: %s:%s", http_host, http_port) # Write the dashboard head port to redis. await self.aioredis_client.set(ray_constants.REDIS_KEY_DASHBOARD, f"{http_host}:{http_port}") await self.aioredis_client.set( dashboard_consts.REDIS_KEY_DASHBOARD_RPC, f"{self.ip}:{self.grpc_port}") # Dump registered http routes. dump_routes = [ r for r in app.router.routes() if r.method != hdrs.METH_HEAD ] for r in dump_routes: logger.info(r) logger.info("Registered %s routes.", len(dump_routes)) # Freeze signal after all modules loaded. dashboard_utils.SignalManager.freeze() concurrent_tasks = [ self._update_nodes(), _async_notify(), DataOrganizer.purge(), DataOrganizer.organize(), ] await asyncio.gather(*concurrent_tasks, *(m.run(self.server) for m in modules)) await self.server.wait_for_termination()
def __init__( self, address: str, autoscaling_config: Union[str, Callable[[], Dict[str, Any]]], redis_password: Optional[str] = None, prefix_cluster_info: bool = False, monitor_ip: Optional[str] = None, stop_event: Optional[Event] = None, retry_on_failure: bool = True, ): gcs_address = address options = (("grpc.enable_http_proxy", 0), ) gcs_channel = ray._private.utils.init_grpc_channel( gcs_address, options) # TODO: Use gcs client for this self.gcs_node_resources_stub = ( gcs_service_pb2_grpc.NodeResourceInfoGcsServiceStub(gcs_channel)) self.gcs_node_info_stub = gcs_service_pb2_grpc.NodeInfoGcsServiceStub( gcs_channel) if redis_password is not None: logger.warning("redis_password has been deprecated.") # Set the redis client and mode so _internal_kv works for autoscaler. worker = ray.worker.global_worker gcs_client = GcsClient(address=gcs_address) if monitor_ip: monitor_addr = f"{monitor_ip}:{AUTOSCALER_METRIC_PORT}" gcs_client.internal_kv_put(b"AutoscalerMetricsAddress", monitor_addr.encode(), True, None) _initialize_internal_kv(gcs_client) if monitor_ip: monitor_addr = f"{monitor_ip}:{AUTOSCALER_METRIC_PORT}" gcs_client.internal_kv_put(b"AutoscalerMetricsAddress", monitor_addr.encode(), True, None) worker.mode = 0 head_node_ip = gcs_address.split(":")[0] self.load_metrics = LoadMetrics() self.last_avail_resources = None self.event_summarizer = EventSummarizer() self.prefix_cluster_info = prefix_cluster_info # Can be used to signal graceful exit from monitor loop. self.stop_event = stop_event # type: Optional[Event] self.retry_on_failure = retry_on_failure self.autoscaling_config = autoscaling_config self.autoscaler = None # If set, we are in a manually created cluster (non-autoscaling) and # simply mirroring what the GCS tells us the cluster node types are. self.readonly_config = None self.prom_metrics = AutoscalerPrometheusMetrics() if monitor_ip and prometheus_client: # If monitor_ip wasn't passed in, then don't attempt to start the # metric server to keep behavior identical to before metrics were # introduced try: logger.info( "Starting autoscaler metrics server on port {}".format( AUTOSCALER_METRIC_PORT)) prometheus_client.start_http_server( port=AUTOSCALER_METRIC_PORT, addr="127.0.0.1" if head_node_ip == "127.0.0.1" else "", registry=self.prom_metrics.registry, ) except Exception: logger.exception( "An exception occurred while starting the metrics server.") elif not prometheus_client: logger.warning( "`prometheus_client` not found, so metrics will not be exported." ) logger.info("Monitor: Started")
async def run(self): # Create an aioredis client for all modules. try: self.aioredis_client = await dashboard_utils.get_aioredis_client( self.redis_address, self.redis_password, dashboard_consts.CONNECT_REDIS_INTERNAL_SECONDS, dashboard_consts.RETRY_REDIS_CONNECTION_TIMES) except (socket.gaierror, ConnectionError): logger.error( "Dashboard head exiting: " "Failed to connect to redis at %s", self.redis_address) sys.exit(-1) # Create a http session for all modules. self.http_session = aiohttp.ClientSession( loop=asyncio.get_event_loop()) # Waiting for GCS is ready. while True: try: gcs_address = await self.aioredis_client.get( dashboard_consts.REDIS_KEY_GCS_SERVER_ADDRESS) if not gcs_address: raise Exception("GCS address not found.") logger.info("Connect to GCS at %s", gcs_address) options = (("grpc.enable_http_proxy", 0), ) channel = aiogrpc.insecure_channel(gcs_address, options=options) except Exception as ex: logger.error("Connect to GCS failed: %s, retry...", ex) await asyncio.sleep( dashboard_consts.CONNECT_GCS_INTERVAL_SECONDS) else: self.aiogrpc_gcs_channel = channel break # Create a NodeInfoGcsServiceStub. self._gcs_node_info_stub = gcs_service_pb2_grpc.NodeInfoGcsServiceStub( self.aiogrpc_gcs_channel) # Start a grpc asyncio server. await self.server.start() # Write the dashboard head port to redis. await self.aioredis_client.set(dashboard_consts.REDIS_KEY_DASHBOARD, self.ip + ":" + str(self.http_port)) await self.aioredis_client.set( dashboard_consts.REDIS_KEY_DASHBOARD_RPC, self.ip + ":" + str(self.grpc_port)) async def _async_notify(): """Notify signals from queue.""" while True: co = await dashboard_utils.NotifyQueue.get() try: await co except Exception: logger.exception(f"Error notifying coroutine {co}") modules = self._load_modules() # Http server should be initialized after all modules loaded. app = aiohttp.web.Application() app.add_routes(routes=routes.bound_routes()) web_server = aiohttp.web._run_app(app, host=self.http_host, port=self.http_port) # Dump registered http routes. dump_routes = [ r for r in app.router.routes() if r.method != hdrs.METH_HEAD ] for r in dump_routes: logger.info(r) logger.info("Registered %s routes.", len(dump_routes)) # Freeze signal after all modules loaded. dashboard_utils.SignalManager.freeze() concurrent_tasks = [ self._update_nodes(), _async_notify(), DataOrganizer.purge(), DataOrganizer.organize(), web_server, ] await asyncio.gather(*concurrent_tasks, *(m.run(self.server) for m in modules)) await self.server.wait_for_termination()
def test_gcs_drain(ray_start_cluster_head, error_pubsub): """ Prepare the cluster. """ cluster = ray_start_cluster_head head_node_id = ray.nodes()[0]["NodeID"] NUM_NODES = 2 for _ in range(NUM_NODES): cluster.add_node(num_cpus=1) worker_node_ids = [] for n in ray.nodes(): if n["NodeID"] != head_node_id: worker_node_ids.append(n["NodeID"]) """ Warm up the cluster. """ @ray.remote(num_cpus=1) class A: def ready(self): pass actors = [A.remote() for _ in range(NUM_NODES)] ray.get([actor.ready.remote() for actor in actors]) """ Test batch drain. """ # Prepare requests. gcs_server_addr = cluster.gcs_address options = (("grpc.enable_http_proxy", 0), ) channel = grpc.insecure_channel(gcs_server_addr, options) stub = gcs_service_pb2_grpc.NodeInfoGcsServiceStub(channel) r = gcs_service_pb2.DrainNodeRequest() for worker_id in worker_node_ids: data = r.drain_node_data.add() data.node_id = NodeID.from_hex(worker_id).binary() stub.DrainNode(r) p = error_pubsub # Error shouldn't be printed to the driver. errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR, timeout=5) assert len(errors) == 0 # There should be only a head node since we drained worker nodes. # NOTE: In the current implementation we kill nodes when draining them. # This check should be removed once we implement # the proper drain behavior. try: wait_for_condition(lambda: len(search_raylet(cluster)) == 1) except Exception: print("More than one raylets are detected.") print(search_raylet(cluster)) """ Make sure the API is idempotent. """ for _ in range(10): stub.DrainNode(r) p = error_pubsub # Error shouldn't be printed to the driver. errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR, timeout=5) assert len(errors) == 0 """ Make sure the GCS states are updated properly. """ for n in ray.nodes(): node_id = n["NodeID"] is_alive = n["Alive"] if node_id == head_node_id: assert is_alive if node_id in worker_node_ids: assert not is_alive """ Make sure head node is not dead and functional. """ a = A.options(num_cpus=0).remote() ray.get(a.ready.remote())