def __init__(self, dashboard_head): super().__init__(dashboard_head) self._gcs_job_info_stub = None self._gcs_actor_info_stub = None self._dashboard_head = dashboard_head assert _internal_kv_initialized() self._job_status_client = JobStatusStorageClient()
def __init__(self, dashboard_head): super().__init__(dashboard_head) self._gcs_job_info_stub = None self._gcs_actor_info_stub = None self._dashboard_head = dashboard_head assert _internal_kv_initialized() self._job_status_client = JobStatusStorageClient() # For offloading CPU intensive work. self._thread_pool = concurrent.futures.ThreadPoolExecutor( max_workers=2, thread_name_prefix="api_head")
def __init__(self, job_id: str, entrypoint: str, user_metadata: Dict[str, str]): self._job_id = job_id self._status_client = JobStatusStorageClient() self._log_client = JobLogStorageClient() self._runtime_env = ray.get_runtime_context().runtime_env self._entrypoint = entrypoint # Default metadata if not passed by the user. self._metadata = {JOB_ID_METADATA_KEY: job_id, JOB_NAME_METADATA_KEY: job_id} self._metadata.update(user_metadata) # fire and forget call from outer job manager to this actor self._stop_event = asyncio.Event()
class JobSupervisor: """ Ray actor created by JobManager for each submitted job, responsible to setup runtime_env, execute given shell command in subprocess, update job status, persist job logs and manage subprocess group cleaning. One job supervisor actor maps to one subprocess, for one job_id. Job supervisor actor should fate share with subprocess it created. """ SUBPROCESS_POLL_PERIOD_S = 0.1 def __init__(self, job_id: str, entrypoint: str, user_metadata: Dict[str, str]): self._job_id = job_id self._status_client = JobStatusStorageClient() self._log_client = JobLogStorageClient() self._runtime_env = ray.get_runtime_context().runtime_env self._entrypoint = entrypoint # Default metadata if not passed by the user. self._metadata = { JOB_ID_METADATA_KEY: job_id, JOB_NAME_METADATA_KEY: job_id } self._metadata.update(user_metadata) # fire and forget call from outer job manager to this actor self._stop_event = asyncio.Event() def ready(self): """Dummy object ref. Return of this function represents job supervisor actor stated successfully with runtime_env configured, and is ready to move on to running state. """ pass def _exec_entrypoint(self, logs_path: str) -> subprocess.Popen: """ Runs the entrypoint command as a child process, streaming stderr & stdout to given log files. Meanwhile we start a demon process and group driver subprocess in same pgid, such that if job actor dies, entire process group also fate share with it. Args: logs_path: File path on head node's local disk to store driver command's stdout & stderr. Returns: child_process: Child process that runs the driver command. Can be terminated or killed upon user calling stop(). """ with open(logs_path, "w") as logs_file: child_process = subprocess.Popen(self._entrypoint, shell=True, start_new_session=True, stdout=logs_file, stderr=subprocess.STDOUT) parent_pid = os.getpid() # Create new pgid with new subprocess to execute driver command child_pid = child_process.pid child_pgid = os.getpgid(child_pid) # Open a new subprocess to kill the child process when the parent # process dies kill -s 0 parent_pid will succeed if the parent is # alive. If it fails, SIGKILL the child process group and exit subprocess.Popen( f"while kill -s 0 {parent_pid}; do sleep 1; done; kill -9 -{child_pgid}", # noqa: E501 shell=True, # Suppress output stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) return child_process async def _polling(self, child_process) -> int: try: while child_process is not None: return_code = child_process.poll() if return_code is not None: # subprocess finished with return code return return_code else: # still running, yield control, 0.1s by default await asyncio.sleep(self.SUBPROCESS_POLL_PERIOD_S) except Exception: if child_process: # TODO (jiaodong): Improve this with SIGTERM then SIGKILL child_process.kill() return 1 async def run( self, # Signal actor used in testing to capture PENDING -> RUNNING cases _start_signal_actor: Optional[ActorHandle] = None): """ Stop and start both happen asynchrously, coordinated by asyncio event and coroutine, respectively. 1) Sets job status as running 2) Pass runtime env and metadata to subprocess as serialized env variables. 3) Handle concurrent events of driver execution and """ cur_status = self._get_status() assert cur_status.status == JobStatus.PENDING, ( "Run should only be called once.") if _start_signal_actor: # Block in PENDING state until start signal received. await _start_signal_actor.wait.remote() self._status_client.put_status(self._job_id, JobStatusInfo(JobStatus.RUNNING)) try: # Set JobConfig for the child process (runtime_env, metadata). os.environ[RAY_JOB_CONFIG_JSON_ENV_VAR] = json.dumps({ "runtime_env": self._runtime_env, "metadata": self._metadata, }) ray_redis_address = ray._private.services.find_redis_address_or_die( # noqa: E501 ) os.environ[ray_constants. RAY_ADDRESS_ENVIRONMENT_VARIABLE] = ray_redis_address log_path = self._log_client.get_log_file_path(self._job_id) child_process = self._exec_entrypoint(log_path) polling_task = create_task(self._polling(child_process)) finished, _ = await asyncio.wait( [polling_task, self._stop_event.wait()], return_when=FIRST_COMPLETED) if self._stop_event.is_set(): polling_task.cancel() # TODO (jiaodong): Improve this with SIGTERM then SIGKILL child_process.kill() self._status_client.put_status(self._job_id, JobStatus.STOPPED) else: # Child process finished execution and no stop event is set # at the same time assert len( finished) == 1, "Should have only one coroutine done" [child_process_task] = finished return_code = child_process_task.result() if return_code == 0: self._status_client.put_status(self._job_id, JobStatus.SUCCEEDED) else: log_tail = self._log_client.tail_logs(self._job_id) if log_tail is not None and log_tail != "": message = ("Job failed due to an application error, " "last available logs:\n" + log_tail) else: message = None self._status_client.put_status( self._job_id, JobStatusInfo(status=JobStatus.FAILED, message=message)) except Exception: logger.error( "Got unexpected exception while trying to execute driver " f"command. {traceback.format_exc()}") finally: # clean up actor after tasks are finished ray.actor.exit_actor() def _get_status(self) -> Optional[JobStatusInfo]: return self._status_client.get_status(self._job_id) def stop(self): """Set step_event and let run() handle the rest in its asyncio.wait(). """ self._stop_event.set()
def __init__(self): self._status_client = JobStatusStorageClient() self._log_client = JobLogStorageClient() self._supervisor_actor_cls = ray.remote(JobSupervisor)
class JobManager: """Provide python APIs for job submission and management. It does not provide persistence, all info will be lost if the cluster goes down. """ JOB_ACTOR_NAME = "_ray_internal_job_actor_{job_id}" def __init__(self): self._status_client = JobStatusStorageClient() self._log_client = JobLogStorageClient() self._supervisor_actor_cls = ray.remote(JobSupervisor) def _get_actor_for_job(self, job_id: str) -> Optional[ActorHandle]: try: return ray.get_actor(self.JOB_ACTOR_NAME.format(job_id=job_id)) except ValueError: # Ray returns ValueError for nonexistent actor. return None def _get_current_node_resource_key(self) -> str: """Get the Ray resource key for current node. It can be used for actor placement. """ current_node_id = ray.get_runtime_context().node_id.hex() for node in ray.nodes(): if node["NodeID"] == current_node_id: # Found the node. for key in node["Resources"].keys(): if key.startswith("node:"): return key else: raise ValueError( "Cannot find the node dictionary for current node.") def _handle_supervisor_startup(self, job_id: str, result: Optional[Exception]): """Handle the result of starting a job supervisor actor. If started successfully, result should be None. Otherwise it should be an Exception. On failure, the job will be marked failed with a relevant error message. """ if result is None: return elif isinstance(result, RuntimeEnvSetupError): logger.info(f"Failed to set up runtime_env for job {job_id}.") self._status_client.put_status( job_id, JobStatusInfo(status=JobStatus.FAILED, message=(f"runtime_env setup failed: {result}"))) elif isinstance(result, Exception): logger.error( f"Failed to start supervisor for job {job_id}: {result}.") self._status_client.put_status( job_id, JobStatusInfo( status=JobStatus.FAILED, message=f"Error occurred while starting the job: {result}") ) else: assert False, "This should not be reached." def submit_job(self, *, entrypoint: str, job_id: Optional[str] = None, runtime_env: Optional[Dict[str, Any]] = None, metadata: Optional[Dict[str, str]] = None, _start_signal_actor: Optional[ActorHandle] = None) -> str: """ Job execution happens asynchronously. 1) Generate a new unique id for this job submission, each call of this method assumes they're independent submission with its own new ID, job supervisor actor, and child process. 2) Create new detached actor with same runtime_env as job spec Actual setting up runtime_env, subprocess group, driver command execution, subprocess cleaning up and running status update to GCS is all handled by job supervisor actor. Args: entrypoint: Driver command to execute in subprocess shell. Represents the entrypoint to start user application. runtime_env: Runtime environment used to execute driver command, which could contain its own ray.init() to configure runtime env at ray cluster, task and actor level. metadata: Support passing arbitrary data to driver command in case needed. _start_signal_actor: Used in testing only to capture state transitions between PENDING -> RUNNING. Regular user shouldn't need this. Returns: job_id: Generated uuid for further job management. Only valid within the same ray cluster. """ if job_id is None: job_id = generate_job_id() elif self._status_client.get_status(job_id) is not None: raise RuntimeError(f"Job {job_id} already exists.") logger.info(f"Starting job with job_id: {job_id}") self._status_client.put_status(job_id, JobStatus.PENDING) # Wait for the actor to start up asynchronously so this call always # returns immediately and we can catch errors with the actor starting # up. We may want to put this in an actor instead in the future. try: actor = self._supervisor_actor_cls.options( lifetime="detached", name=self.JOB_ACTOR_NAME.format(job_id=job_id), num_cpus=0, # Currently we assume JobManager is created by dashboard server # running on headnode, same for job supervisor actors scheduled resources={ self._get_current_node_resource_key(): 0.001, }, runtime_env=runtime_env).remote(job_id, entrypoint, metadata or {}) actor.run.remote(_start_signal_actor=_start_signal_actor) def callback(result: Optional[Exception]): return self._handle_supervisor_startup(job_id, result) actor.ready.remote()._on_completed(callback) except Exception as e: self._handle_supervisor_startup(job_id, e) return job_id def stop_job(self, job_id) -> bool: """Request job to exit, fire and forget. Args: job_id: ID of the job. Returns: stopped: True if there's running job False if no running job found """ job_supervisor_actor = self._get_actor_for_job(job_id) if job_supervisor_actor is not None: # Actor is still alive, signal it to stop the driver, fire and # forget job_supervisor_actor.stop.remote() return True else: return False def get_job_status(self, job_id: str) -> JobStatusInfo: """Get latest status of a job. If job supervisor actor is no longer alive, it will also attempt to make adjustments needed to bring job to correct terminiation state. All job status is stored and read only from GCS. Args: job_id: ID of the job. Returns: job_status: Latest known job status """ job_supervisor_actor = self._get_actor_for_job(job_id) if job_supervisor_actor is None: # Job actor either exited or failed, we need to ensure never # left job in non-terminal status in case actor failed without # updating GCS with latest status. last_status = self._status_client.get_status(job_id) if last_status and last_status.status in { JobStatus.PENDING, JobStatus.RUNNING }: self._status_client.put_status(job_id, JobStatus.FAILED) return self._status_client.get_status(job_id) def get_job_logs(self, job_id: str) -> bytes: return self._log_client.get_logs(job_id)
class APIHead(dashboard_utils.DashboardHeadModule): def __init__(self, dashboard_head): super().__init__(dashboard_head) self._gcs_job_info_stub = None self._gcs_actor_info_stub = None self._dashboard_head = dashboard_head assert _internal_kv_initialized() self._job_status_client = JobStatusStorageClient() # For offloading CPU intensive work. self._thread_pool = concurrent.futures.ThreadPoolExecutor( max_workers=2, thread_name_prefix="api_head") @routes.get("/api/actors/kill") async def kill_actor_gcs(self, req) -> aiohttp.web.Response: actor_id = req.query.get("actor_id") force_kill = req.query.get("force_kill", False) in ("true", "True") no_restart = req.query.get("no_restart", False) in ("true", "True") if not actor_id: return dashboard_optional_utils.rest_response( success=False, message="actor_id is required.") request = gcs_service_pb2.KillActorViaGcsRequest() request.actor_id = bytes.fromhex(actor_id) request.force_kill = force_kill request.no_restart = no_restart await self._gcs_actor_info_stub.KillActorViaGcs(request, timeout=5) message = (f"Force killed actor with id {actor_id}" if force_kill else f"Requested actor with id {actor_id} to terminate. " + "It will exit once running tasks complete") return dashboard_optional_utils.rest_response(success=True, message=message) @routes.get("/api/snapshot") async def snapshot(self, req): job_data, actor_data, serve_data, session_name = await asyncio.gather( self.get_job_info(), self.get_actor_info(), self.get_serve_info(), self.get_session_name()) snapshot = { "jobs": job_data, "actors": actor_data, "deployments": serve_data, "session_name": session_name, "ray_version": ray.__version__, "ray_commit": ray.__commit__ } return dashboard_optional_utils.rest_response(success=True, message="hello", snapshot=snapshot) def _get_job_status(self, metadata: Dict[str, str]) -> Optional[JobStatusInfo]: # If a job submission ID has been added to a job, the status is # guaranteed to be returned. job_submission_id = metadata.get(JOB_ID_METADATA_KEY) return self._job_status_client.get_status(job_submission_id) async def get_job_info(self): request = gcs_service_pb2.GetAllJobInfoRequest() reply = await self._gcs_job_info_stub.GetAllJobInfo(request, timeout=5) jobs = {} for job_table_entry in reply.job_info_list: job_id = job_table_entry.job_id.hex() metadata = dict(job_table_entry.config.metadata) config = { "namespace": job_table_entry.config.ray_namespace, "metadata": metadata, "runtime_env": ParsedRuntimeEnv.deserialize( job_table_entry.config.runtime_env_info. serialized_runtime_env), } status = self._get_job_status(metadata) entry = { "status": None if status is None else status.status, "status_message": None if status is None else status.message, "is_dead": job_table_entry.is_dead, "start_time": job_table_entry.start_time, "end_time": job_table_entry.end_time, "config": config, } jobs[job_id] = entry return jobs async def get_actor_info(self): # TODO (Alex): GCS still needs to return actors from dead jobs. request = gcs_service_pb2.GetAllActorInfoRequest() request.show_dead_jobs = True reply = await self._gcs_actor_info_stub.GetAllActorInfo(request, timeout=5) actors = {} for actor_table_entry in reply.actor_table_data: actor_id = actor_table_entry.actor_id.hex() runtime_env = json.loads(actor_table_entry.serialized_runtime_env) entry = { "job_id": actor_table_entry.job_id.hex(), "state": gcs_pb2.ActorTableData.ActorState.Name( actor_table_entry.state), "name": actor_table_entry.name, "namespace": actor_table_entry.ray_namespace, "runtime_env": runtime_env, "start_time": actor_table_entry.start_time, "end_time": actor_table_entry.end_time, "is_detached": actor_table_entry.is_detached, "resources": dict(actor_table_entry.task_spec.required_resources), "actor_class": actor_table_entry.class_name, "current_worker_id": actor_table_entry.address.worker_id.hex(), "current_raylet_id": actor_table_entry.address.raylet_id.hex(), "ip_address": actor_table_entry.address.ip_address, "port": actor_table_entry.address.port, "metadata": dict() } actors[actor_id] = entry deployments = await self.get_serve_info() for _, deployment_info in deployments.items(): for replica_actor_id, actor_info in deployment_info[ "actors"].items(): if replica_actor_id in actors: serve_metadata = dict() serve_metadata["replica_tag"] = actor_info[ "replica_tag"] serve_metadata["deployment_name"] = deployment_info[ "name"] serve_metadata["version"] = actor_info["version"] actors[replica_actor_id]["metadata"][ "serve"] = serve_metadata return actors async def get_serve_info(self) -> Dict[str, Any]: # Conditionally import serve to prevent ModuleNotFoundError from serve # dependencies when only ray[default] is installed (#17712) try: from ray.serve.controller import SNAPSHOT_KEY as SERVE_SNAPSHOT_KEY from ray.serve.constants import SERVE_CONTROLLER_NAME except Exception: return {} # Serve wraps Ray's internal KV store and specially formats the keys. # These are the keys we are interested in: # SERVE_CONTROLLER_NAME(+ optional random letters):SERVE_SNAPSHOT_KEY # TODO: Convert to async GRPC, if CPU usage is not a concern. def get_deployments(): serve_keys = _internal_kv_list( SERVE_CONTROLLER_NAME, namespace=ray_constants.KV_NAMESPACE_SERVE) serve_snapshot_keys = filter( lambda k: SERVE_SNAPSHOT_KEY in str(k), serve_keys) deployments_per_controller: List[Dict[str, Any]] = [] for key in serve_snapshot_keys: val_bytes = _internal_kv_get( key, namespace=ray_constants.KV_NAMESPACE_SERVE ) or "{}".encode("utf-8") deployments_per_controller.append( json.loads(val_bytes.decode("utf-8"))) # Merge the deployments dicts of all controllers. deployments: Dict[str, Any] = { k: v for d in deployments_per_controller for k, v in d.items() } # Replace the keys (deployment names) with their hashes to prevent # collisions caused by the automatic conversion to camelcase by the # dashboard agent. return { hashlib.sha1(name.encode()).hexdigest(): info for name, info in deployments.items() } return await asyncio.get_event_loop().run_in_executor( executor=self._thread_pool, func=get_deployments) async def get_session_name(self): # TODO(yic): Convert to async GRPC. def get_session(): return ray.experimental.internal_kv._internal_kv_get( "session_name", namespace=ray_constants.KV_NAMESPACE_SESSION).decode() return await asyncio.get_event_loop().run_in_executor( executor=self._thread_pool, func=get_session) async def run(self, server): self._gcs_job_info_stub = gcs_service_pb2_grpc.JobInfoGcsServiceStub( self._dashboard_head.aiogrpc_gcs_channel) self._gcs_actor_info_stub = \ gcs_service_pb2_grpc.ActorInfoGcsServiceStub( self._dashboard_head.aiogrpc_gcs_channel) @staticmethod def is_minimal_module(): return False
class JobManager: """Provide python APIs for job submission and management. It does not provide persistence, all info will be lost if the cluster goes down. """ JOB_ACTOR_NAME = "_ray_internal_job_actor_{job_id}" # Time that we will sleep while tailing logs if no new log line is # available. LOG_TAIL_SLEEP_S = 1 JOB_MONITOR_LOOP_PERIOD_S = 1 def __init__(self): self._status_client = JobStatusStorageClient() self._log_client = JobLogStorageClient() self._supervisor_actor_cls = ray.remote(JobSupervisor) self._recover_running_jobs() def _recover_running_jobs(self): """Recovers all running jobs from the status client. For each job, we will spawn a coroutine to monitor it. Each will be added to self._running_jobs and reconciled. """ all_jobs = self._status_client.get_all_jobs() for job_id, status_info in all_jobs.items(): if not status_info.status.is_terminal(): create_task(self._monitor_job(job_id)) def _get_actor_for_job(self, job_id: str) -> Optional[ActorHandle]: try: return ray.get_actor(self.JOB_ACTOR_NAME.format(job_id=job_id)) except ValueError: # Ray returns ValueError for nonexistent actor. return None async def _monitor_job( self, job_id: str, job_supervisor: Optional[ActorHandle] = None ): """Monitors the specified job until it enters a terminal state. This is necessary because we need to handle the case where the JobSupervisor dies unexpectedly. """ is_alive = True if job_supervisor is None: job_supervisor = self._get_actor_for_job(job_id) if job_supervisor is None: logger.error(f"Failed to get job supervisor for job {job_id}.") self._status_client.put_status( job_id, JobStatusInfo( status=JobStatus.FAILED, message=( "Unexpected error occurred: Failed to get job supervisor." ), ), ) is_alive = False while is_alive: try: await job_supervisor.ping.remote() await asyncio.sleep(self.JOB_MONITOR_LOOP_PERIOD_S) except Exception as e: is_alive = False if self._status_client.get_status(job_id).status.is_terminal(): # If the job is already in a terminal state, then the actor # exiting is expected. pass elif isinstance(e, RuntimeEnvSetupError): logger.info(f"Failed to set up runtime_env for job {job_id}.") self._status_client.put_status( job_id, JobStatusInfo( status=JobStatus.FAILED, message=(f"runtime_env setup failed: {e}"), ), ) else: logger.warning( f"Job supervisor for job {job_id} failed unexpectedly: {e}." ) self._status_client.put_status( job_id, JobStatusInfo( status=JobStatus.FAILED, message=f"Unexpected error occurred: {e}", ), ) # Kill the actor defensively to avoid leaking actors in unexpected error cases. if job_supervisor is not None: ray.kill(job_supervisor, no_restart=True) def _get_current_node_resource_key(self) -> str: """Get the Ray resource key for current node. It can be used for actor placement. """ current_node_id = ray.get_runtime_context().node_id.hex() for node in ray.nodes(): if node["NodeID"] == current_node_id: # Found the node. for key in node["Resources"].keys(): if key.startswith("node:"): return key else: raise ValueError("Cannot find the node dictionary for current node.") def _handle_supervisor_startup(self, job_id: str, result: Optional[Exception]): """Handle the result of starting a job supervisor actor. If started successfully, result should be None. Otherwise it should be an Exception. On failure, the job will be marked failed with a relevant error message. """ if result is None: return def submit_job( self, *, entrypoint: str, job_id: Optional[str] = None, runtime_env: Optional[Dict[str, Any]] = None, metadata: Optional[Dict[str, str]] = None, _start_signal_actor: Optional[ActorHandle] = None, ) -> str: """ Job execution happens asynchronously. 1) Generate a new unique id for this job submission, each call of this method assumes they're independent submission with its own new ID, job supervisor actor, and child process. 2) Create new detached actor with same runtime_env as job spec Actual setting up runtime_env, subprocess group, driver command execution, subprocess cleaning up and running status update to GCS is all handled by job supervisor actor. Args: entrypoint: Driver command to execute in subprocess shell. Represents the entrypoint to start user application. runtime_env: Runtime environment used to execute driver command, which could contain its own ray.init() to configure runtime env at ray cluster, task and actor level. metadata: Support passing arbitrary data to driver command in case needed. _start_signal_actor: Used in testing only to capture state transitions between PENDING -> RUNNING. Regular user shouldn't need this. Returns: job_id: Generated uuid for further job management. Only valid within the same ray cluster. """ if job_id is None: job_id = generate_job_id() elif self._status_client.get_status(job_id) is not None: raise RuntimeError(f"Job {job_id} already exists.") logger.info(f"Starting job with job_id: {job_id}") self._status_client.put_status(job_id, JobStatus.PENDING) # Wait for the actor to start up asynchronously so this call always # returns immediately and we can catch errors with the actor starting # up. try: supervisor = self._supervisor_actor_cls.options( lifetime="detached", name=self.JOB_ACTOR_NAME.format(job_id=job_id), num_cpus=0, # Currently we assume JobManager is created by dashboard server # running on headnode, same for job supervisor actors scheduled resources={ self._get_current_node_resource_key(): 0.001, }, runtime_env=runtime_env, ).remote(job_id, entrypoint, metadata or {}) supervisor.run.remote(_start_signal_actor=_start_signal_actor) # Monitor the job in the background so we can detect errors without # requiring a client to poll. create_task(self._monitor_job(job_id, job_supervisor=supervisor)) except Exception as e: self._status_client.put_status( job_id, JobStatusInfo( status=JobStatus.FAILED, message=f"Failed to start job supervisor: {e}.", ), ) return job_id def stop_job(self, job_id) -> bool: """Request a job to exit, fire and forget. Returns whether or not the job was running. """ job_supervisor_actor = self._get_actor_for_job(job_id) if job_supervisor_actor is not None: # Actor is still alive, signal it to stop the driver, fire and # forget job_supervisor_actor.stop.remote() return True else: return False def get_job_status(self, job_id: str) -> Optional[JobStatus]: """Get latest status of a job.""" return self._status_client.get_status(job_id) def get_job_logs(self, job_id: str) -> str: """Get all logs produced by a job.""" return self._log_client.get_logs(job_id) async def tail_job_logs(self, job_id: str) -> Iterator[str]: """Return an iterator following the logs of a job.""" if self.get_job_status(job_id) is None: raise RuntimeError(f"Job '{job_id}' does not exist.") for line in self._log_client.tail_logs(job_id): if line is None: # Return if the job has exited and there are no new log lines. status = self.get_job_status(job_id) if status.status not in {JobStatus.PENDING, JobStatus.RUNNING}: return await asyncio.sleep(self.LOG_TAIL_SLEEP_S) else: yield line