def test_serialization(self, is_task_or_actor): env1 = ParsedRuntimeEnv( { "pip": ["requests"], "env_vars": { "hi1": "hi1", "hi2": "hi2" } }, is_task_or_actor=is_task_or_actor) env2 = ParsedRuntimeEnv( { "env_vars": { "hi2": "hi2", "hi1": "hi1" }, "pip": ["requests"] }, is_task_or_actor=is_task_or_actor) assert env1 == env2 serialized_env1 = env1.serialize() serialized_env2 = env2.serialize() # Key ordering shouldn't matter. assert serialized_env1 == serialized_env2 deserialized_env1 = ParsedRuntimeEnv.deserialize(serialized_env1) deserialized_env2 = ParsedRuntimeEnv.deserialize(serialized_env2) assert env1 == deserialized_env1 == env2 == deserialized_env2
async def get_job_info(self): """Return info for each job. Here a job is a Ray driver.""" request = gcs_service_pb2.GetAllJobInfoRequest() reply = await self._gcs_job_info_stub.GetAllJobInfo(request, timeout=5) jobs = {} for job_table_entry in reply.job_info_list: job_id = job_table_entry.job_id.hex() metadata = dict(job_table_entry.config.metadata) config = { "namespace": job_table_entry.config.ray_namespace, "metadata": metadata, "runtime_env": ParsedRuntimeEnv.deserialize( job_table_entry.config.runtime_env_info. serialized_runtime_env), } data = self._get_job_data(metadata) entry = { "status": None if data is None else data.status, "status_message": None if data is None else data.message, "is_dead": job_table_entry.is_dead, "start_time": job_table_entry.start_time, "end_time": job_table_entry.end_time, "config": config, } jobs[job_id] = entry return jobs
def runtime_env(self): """Get the runtime env dict used for the current driver or worker. Returns: The runtime env dict currently using by this worker. """ return ParsedRuntimeEnv.deserialize(self.get_runtime_env_string())
def run_setup_with_logger(): runtime_env: dict = ParsedRuntimeEnv.deserialize( serialized_runtime_env or "{}") allocated_resource: dict = json.loads( serialized_allocated_resource_instances or "{}") # Use a separate logger for each job. per_job_logger = self.get_or_create_logger(request.job_id) # TODO(chenk008): Add log about allocated_resource to # avoid lint error. That will be moved to cgroup plugin. per_job_logger.debug(f"Worker has resource :" f"{allocated_resource}") context = RuntimeEnvContext( env_vars=runtime_env.get("env_vars")) self._conda_manager.setup(runtime_env, context, logger=per_job_logger) self._py_modules_manager.setup(runtime_env, context, logger=per_job_logger) self._working_dir_manager.setup(runtime_env, context, logger=per_job_logger) self._container_manager.setup(runtime_env, context, logger=per_job_logger) # Add the mapping of URIs -> the serialized environment to be # used for cache invalidation. for plugin_uri in runtime_env.get_uris(): self._uris_to_envs[plugin_uri].add(serialized_runtime_env) # Run setup function from all the plugins for plugin_class_path in runtime_env.get("plugins", {}).keys(): plugin_class = import_attr(plugin_class_path) # TODO(simon): implement uri support plugin_class.create("uri not implemented", runtime_env, context) plugin_class.modify_context("uri not implemented", runtime_env, context) return context