Esempio n. 1
0
def test_sync_job_config(shutdown_only):
    num_java_workers_per_process = 8
    runtime_env = {"env_vars": {"key": "value"}}

    ray.init(job_config=ray.job_config.JobConfig(
        num_java_workers_per_process=num_java_workers_per_process,
        runtime_env=runtime_env,
    ))

    # Check that the job config is synchronized at the driver side.
    job_config = ray.worker.global_worker.core_worker.get_job_config()
    assert job_config.num_java_workers_per_process == num_java_workers_per_process
    job_runtime_env = RuntimeEnv.deserialize(
        job_config.runtime_env_info.serialized_runtime_env)
    assert job_runtime_env.env_vars() == runtime_env["env_vars"]

    @ray.remote
    def get_job_config():
        job_config = ray.worker.global_worker.core_worker.get_job_config()
        return job_config.SerializeToString()

    # Check that the job config is synchronized at the worker side.
    job_config = gcs_utils.JobConfig()
    job_config.ParseFromString(ray.get(get_job_config.remote()))
    assert job_config.num_java_workers_per_process == num_java_workers_per_process
    job_runtime_env = RuntimeEnv.deserialize(
        job_config.runtime_env_info.serialized_runtime_env)
    assert job_runtime_env.env_vars() == runtime_env["env_vars"]
    def test_serialization(self):
        env1 = RuntimeEnv(pip=["requests"],
                          env_vars={
                              "hi1": "hi1",
                              "hi2": "hi2"
                          })

        env2 = RuntimeEnv(env_vars={
            "hi2": "hi2",
            "hi1": "hi1"
        },
                          pip=["requests"])

        assert env1 == env2

        serialized_env1 = env1.serialize()
        serialized_env2 = env2.serialize()

        # Key ordering shouldn't matter.
        assert serialized_env1 == serialized_env2

        deserialized_env1 = RuntimeEnv.deserialize(serialized_env1)
        deserialized_env2 = RuntimeEnv.deserialize(serialized_env2)

        assert env1 == deserialized_env1 == env2 == deserialized_env2
Esempio n. 3
0
    async def get_job_info(self):
        """Return info for each job.  Here a job is a Ray driver."""
        request = gcs_service_pb2.GetAllJobInfoRequest()
        reply = await self._gcs_job_info_stub.GetAllJobInfo(request, timeout=5)

        jobs = {}
        for job_table_entry in reply.job_info_list:
            job_id = job_table_entry.job_id.hex()
            metadata = dict(job_table_entry.config.metadata)
            config = {
                "namespace":
                job_table_entry.config.ray_namespace,
                "metadata":
                metadata,
                "runtime_env":
                RuntimeEnv.deserialize(job_table_entry.config.runtime_env_info.
                                       serialized_runtime_env),
            }
            info = self._get_job_info(metadata)
            entry = {
                "status": None if info is None else info.status,
                "status_message": None if info is None else info.message,
                "is_dead": job_table_entry.is_dead,
                "start_time": job_table_entry.start_time,
                "end_time": job_table_entry.end_time,
                "config": config,
            }
            jobs[job_id] = entry

        return jobs
Esempio n. 4
0
    def runtime_env(self):
        """Get the runtime env used for the current driver or worker.

        Returns:
            The runtime env currently using by this worker. The type of
                return value is ray.runtime_env.RuntimeEnv.
        """

        return RuntimeEnv.deserialize(self.get_runtime_env_string())
Esempio n. 5
0
    async def list_runtime_envs(self, *, option: ListApiOptions) -> List[dict]:
        """List all runtime env information from the cluster.

        Returns:
            A list of runtime env information in the cluster.
            The schema of returned "dict" is equivalent to the
            `RuntimeEnvState` protobuf message.
            We don't have id -> data mapping like other API because runtime env
            doesn't have unique ids.
        """
        replies = await asyncio.gather(*[
            self._client.get_runtime_envs_info(node_id, timeout=option.timeout)
            for node_id in self._client.get_all_registered_agent_ids()
        ])
        result = []
        for node_id, reply in zip(self._client.get_all_registered_agent_ids(),
                                  replies):
            states = reply.runtime_env_states
            for state in states:
                data = self._message_to_dict(message=state,
                                             fields_to_decode=[])
                # Need to deseiralize this field.
                data["runtime_env"] = RuntimeEnv.deserialize(
                    data["runtime_env"]).to_dict()
                data["node_id"] = node_id
                data = filter_fields(data, RuntimeEnvState)
                result.append(data)

        # Sort to make the output deterministic.
        def sort_func(entry):
            # If creation time is not there yet (runtime env is failed
            # to be created or not created yet, they are the highest priority.
            # Otherwise, "bigger" creation time is coming first.
            if "creation_time_ms" not in entry:
                return float("inf")
            elif entry["creation_time_ms"] is None:
                return float("inf")
            else:
                return float(entry["creation_time_ms"])

        result.sort(key=sort_func, reverse=True)
        return list(islice(result, option.limit))
Esempio n. 6
0
def test_convert_from_and_to_dataclass():
    runtime_env = RuntimeEnv()
    test_plugin = TestPlugin(
        field1=[
            ValueType(nfield1=["a", "b", "c"], nfield2=False),
            ValueType(nfield1=["d", "e"], nfield2=True),
        ],
        field2="abc",
    )
    runtime_env.set("test_plugin", test_plugin)
    serialized_runtime_env = runtime_env.serialize()
    assert "test_plugin" in serialized_runtime_env
    runtime_env_2 = RuntimeEnv.deserialize(serialized_runtime_env)
    test_plugin_2 = runtime_env_2.get("test_plugin", data_class=TestPlugin)
    assert len(test_plugin_2.field1) == 2
    assert test_plugin_2.field1[0].nfield1 == ["a", "b", "c"]
    assert test_plugin_2.field1[0].nfield2 is False
    assert test_plugin_2.field1[1].nfield1 == ["d", "e"]
    assert test_plugin_2.field1[1].nfield2 is True
    assert test_plugin_2.field2 == "abc"
Esempio n. 7
0
def test_serialize_deserialize(option):
    runtime_env = dict()
    if option == "pip_list":
        runtime_env["pip"] = ["pkg1", "pkg2"]
    elif option == "pip_dict":
        runtime_env["pip"] = {
            "packages": ["pkg1", "pkg2"],
            "pip_check": False,
            "pip_version": "<22,>20",
        }
    elif option == "conda_name":
        runtime_env["conda"] = "env_name"
    elif option == "conda_dict":
        runtime_env["conda"] = {"dependencies": ["dep1", "dep2"]}
    elif option == "container":
        runtime_env["container"] = {
            "image": "anyscale/ray-ml:nightly-py38-cpu",
            "worker_path":
            "/root/python/ray/_private/workers/default_worker.py",
            "run_options": ["--cap-drop SYS_ADMIN", "--log-level=debug"],
        }
    else:
        raise ValueError("unexpected option " + str(option))

    typed_runtime_env = RuntimeEnv(**runtime_env)
    serialized_runtime_env = typed_runtime_env.serialize()
    cls_runtime_env = RuntimeEnv.deserialize(serialized_runtime_env)
    cls_runtime_env_dict = cls_runtime_env.to_dict()

    if "pip" in typed_runtime_env and isinstance(typed_runtime_env["pip"],
                                                 list):
        pip_config_in_cls_runtime_env = cls_runtime_env_dict.pop("pip")
        pip_config_in_runtime_env = typed_runtime_env.pop("pip")
        assert {
            "packages": pip_config_in_runtime_env,
            "pip_check": False,
        } == pip_config_in_cls_runtime_env

    assert cls_runtime_env_dict == typed_runtime_env
Esempio n. 8
0
    async def DeleteRuntimeEnvIfPossible(self, request, context):
        self._logger.info(
            f"Got request from {request.source_process} to decrease "
            "reference for runtime env: "
            f"{request.serialized_runtime_env}.")

        try:
            runtime_env = RuntimeEnv.deserialize(
                request.serialized_runtime_env)
        except Exception as e:
            self._logger.exception("[Decrease] Failed to parse runtime env: "
                                   f"{request.serialized_runtime_env}")
            return runtime_env_agent_pb2.GetOrCreateRuntimeEnvReply(
                status=agent_manager_pb2.AGENT_RPC_STATUS_FAILED,
                error_message="".join(
                    traceback.format_exception(type(e), e, e.__traceback__)),
            )

        self._reference_table.decrease_reference(
            runtime_env, request.serialized_runtime_env,
            request.source_process)

        return runtime_env_agent_pb2.DeleteRuntimeEnvIfPossibleReply(
            status=agent_manager_pb2.AGENT_RPC_STATUS_OK)
Esempio n. 9
0
    def runtime_env(self):
        """Get the runtime env of the current job/worker.

        If this API is called in driver or ray client, returns the job level runtime
        env.
        If this API is called in workers/actors, returns the worker level runtime env.

        Returns:
            A new ray.runtime_env.RuntimeEnv instance.

        To merge from the current runtime env in some specific cases, you can get the
        current runtime env by this API and modify it by yourself.

        Example:

            >>> # Inherit current runtime env, except `env_vars`
            >>> Actor.options( # doctest: +SKIP
            ...     runtime_env=ray.get_runtime_context().runtime_env.update(
            ...     {"env_vars": {"A": "a", "B": "b"}})
            ... )

        """

        return RuntimeEnv.deserialize(self._get_runtime_env_string())
Esempio n. 10
0
        async def _setup_runtime_env(
            serialized_runtime_env, serialized_allocated_resource_instances
        ):
            runtime_env = RuntimeEnv.deserialize(serialized_runtime_env)
            allocated_resource: dict = json.loads(
                serialized_allocated_resource_instances or "{}"
            )

            # Use a separate logger for each job.
            per_job_logger = self.get_or_create_logger(request.job_id)
            # TODO(chenk008): Add log about allocated_resource to
            # avoid lint error. That will be moved to cgroup plugin.
            per_job_logger.debug(f"Worker has resource :" f"{allocated_resource}")
            context = RuntimeEnvContext(env_vars=runtime_env.env_vars())
            await self._container_manager.setup(
                runtime_env, context, logger=per_job_logger
            )

            for (manager, uri_cache) in [
                (self._working_dir_manager, self._working_dir_uri_cache),
                (self._conda_manager, self._conda_uri_cache),
                (self._pip_manager, self._pip_uri_cache),
            ]:
                uri = manager.get_uri(runtime_env)
                if uri is not None:
                    if uri not in uri_cache:
                        per_job_logger.debug(f"Cache miss for URI {uri}.")
                        size_bytes = await manager.create(
                            uri, runtime_env, context, logger=per_job_logger
                        )
                        uri_cache.add(uri, size_bytes, logger=per_job_logger)
                    else:
                        per_job_logger.debug(f"Cache hit for URI {uri}.")
                        uri_cache.mark_used(uri, logger=per_job_logger)
                manager.modify_context(uri, runtime_env, context)

            # Set up py_modules. For now, py_modules uses multiple URIs so
            # the logic is slightly different from working_dir, conda, and
            # pip above.
            py_modules_uris = self._py_modules_manager.get_uris(runtime_env)
            if py_modules_uris is not None:
                for uri in py_modules_uris:
                    if uri not in self._py_modules_uri_cache:
                        per_job_logger.debug(f"Cache miss for URI {uri}.")
                        size_bytes = await self._py_modules_manager.create(
                            uri, runtime_env, context, logger=per_job_logger
                        )
                        self._py_modules_uri_cache.add(
                            uri, size_bytes, logger=per_job_logger
                        )
                    else:
                        per_job_logger.debug(f"Cache hit for URI {uri}.")
                        self._py_modules_uri_cache.mark_used(uri, logger=per_job_logger)
            self._py_modules_manager.modify_context(
                py_modules_uris, runtime_env, context
            )

            # Add the mapping of URIs -> the serialized environment to be
            # used for cache invalidation.
            if runtime_env.working_dir_uri():
                uri = runtime_env.working_dir_uri()
                self._uris_to_envs[uri].add(serialized_runtime_env)
            if runtime_env.py_modules_uris():
                for uri in runtime_env.py_modules_uris():
                    self._uris_to_envs[uri].add(serialized_runtime_env)
            if runtime_env.conda_uri():
                uri = runtime_env.conda_uri()
                self._uris_to_envs[uri].add(serialized_runtime_env)
            if runtime_env.pip_uri():
                uri = runtime_env.pip_uri()
                self._uris_to_envs[uri].add(serialized_runtime_env)
            if runtime_env.plugin_uris():
                for uri in runtime_env.plugin_uris():
                    self._uris_to_envs[uri].add(serialized_runtime_env)

            def setup_plugins():
                # Run setup function from all the plugins
                for plugin_class_path, config in runtime_env.plugins():
                    per_job_logger.debug(
                        f"Setting up runtime env plugin {plugin_class_path}"
                    )
                    plugin_class = import_attr(plugin_class_path)
                    # TODO(simon): implement uri support
                    plugin_class.create(
                        "uri not implemented", json.loads(config), context
                    )
                    plugin_class.modify_context(
                        "uri not implemented", json.loads(config), context
                    )

            loop = asyncio.get_event_loop()
            # Plugins setup method is sync process, running in other threads
            # is to avoid  blocks asyncio loop
            await loop.run_in_executor(None, setup_plugins)

            return context
Esempio n. 11
0
    async def list_runtime_envs(self, *, option: ListApiOptions) -> ListApiResponse:
        """List all runtime env information from the cluster.

        Returns:
            A list of runtime env information in the cluster.
            The schema of returned "dict" is equivalent to the
            `RuntimeEnvState` protobuf message.
            We don't have id -> data mapping like other API because runtime env
            doesn't have unique ids.
        """
        agent_ids = self._client.get_all_registered_agent_ids()
        replies = await asyncio.gather(
            *[
                self._client.get_runtime_envs_info(node_id, timeout=option.timeout)
                for node_id in agent_ids
            ],
            return_exceptions=True,
        )

        result = []
        unresponsive_nodes = 0
        for node_id, reply in zip(self._client.get_all_registered_agent_ids(), replies):
            if isinstance(reply, DataSourceUnavailable):
                unresponsive_nodes += 1
                continue
            elif isinstance(reply, Exception):
                raise reply

            states = reply.runtime_env_states
            for state in states:
                data = self._message_to_dict(message=state, fields_to_decode=[])
                # Need to deseiralize this field.
                data["runtime_env"] = RuntimeEnv.deserialize(
                    data["runtime_env"]
                ).to_dict()
                data["node_id"] = node_id
                result.append(data)

        partial_failure_warning = None
        if len(agent_ids) > 0 and unresponsive_nodes > 0:
            warning_msg = NODE_QUERY_FAILURE_WARNING.format(
                type="agent",
                total=len(agent_ids),
                network_failures=unresponsive_nodes,
                log_command="dashboard_agent.log",
            )
            if unresponsive_nodes == len(agent_ids):
                raise DataSourceUnavailable(warning_msg)
            partial_failure_warning = (
                f"The returned data may contain incomplete result. {warning_msg}"
            )

        result = self._filter(result, option.filters, RuntimeEnvState)

        # Sort to make the output deterministic.
        def sort_func(entry):
            # If creation time is not there yet (runtime env is failed
            # to be created or not created yet, they are the highest priority.
            # Otherwise, "bigger" creation time is coming first.
            if "creation_time_ms" not in entry:
                return float("inf")
            elif entry["creation_time_ms"] is None:
                return float("inf")
            else:
                return float(entry["creation_time_ms"])

        result.sort(key=sort_func, reverse=True)
        return ListApiResponse(
            result=list(islice(result, option.limit)),
            partial_failure_warning=partial_failure_warning,
        )
Esempio n. 12
0
    async def GetOrCreateRuntimeEnv(self, request, context):
        self._logger.debug(
            f"Got request from {request.source_process} to increase "
            "reference for runtime env: "
            f"{request.serialized_runtime_env}.")

        async def _setup_runtime_env(runtime_env, serialized_runtime_env,
                                     serialized_allocated_resource_instances):
            allocated_resource: dict = json.loads(
                serialized_allocated_resource_instances or "{}")
            # Use a separate logger for each job.
            per_job_logger = self.get_or_create_logger(request.job_id)
            # TODO(chenk008): Add log about allocated_resource to
            # avoid lint error. That will be moved to cgroup plugin.
            per_job_logger.debug(f"Worker has resource :"
                                 f"{allocated_resource}")
            context = RuntimeEnvContext(env_vars=runtime_env.env_vars())
            await self._container_manager.setup(runtime_env,
                                                context,
                                                logger=per_job_logger)

            for manager in self._base_plugin_cache_managers.values():
                await manager.create_if_needed(runtime_env,
                                               context,
                                               logger=per_job_logger)

            def setup_plugins():
                # Run setup function from all the plugins
                for name, config in runtime_env.plugins():
                    per_job_logger.debug(
                        f"Setting up runtime env plugin {name}")
                    plugin = self._runtime_env_plugin_manager.get_plugin(name)
                    if plugin is None:
                        raise RuntimeError(
                            f"runtime env plugin {name} not found.")
                    # TODO(architkulkarni): implement uri support
                    plugin.validate(runtime_env)
                    plugin.create("uri not implemented", json.loads(config),
                                  context)
                    plugin.modify_context(
                        "uri not implemented",
                        json.loads(config),
                        context,
                        per_job_logger,
                    )

            loop = asyncio.get_event_loop()
            # Plugins setup method is sync process, running in other threads
            # is to avoid blocking asyncio loop
            await loop.run_in_executor(None, setup_plugins)

            return context

        async def _create_runtime_env_with_retry(
            runtime_env,
            serialized_runtime_env,
            serialized_allocated_resource_instances,
            setup_timeout_seconds,
        ) -> Tuple[bool, str, str]:
            """
            Create runtime env with retry times. This function won't raise exceptions.

            Args:
                runtime_env(RuntimeEnv): The instance of RuntimeEnv class.
                serialized_runtime_env(str): The serialized runtime env.
                serialized_allocated_resource_instances(str): The serialized allocated
                resource instances.
                setup_timeout_seconds(int): The timeout of runtime environment creation.

            Returns:
                a tuple which contains result(bool), runtime env context(str), error
                message(str).

            """
            self._logger.info(
                f"Creating runtime env: {serialized_env} with timeout "
                f"{setup_timeout_seconds} seconds.")
            serialized_context = None
            error_message = None
            for _ in range(runtime_env_consts.RUNTIME_ENV_RETRY_TIMES):
                try:
                    # python 3.6 requires the type of input is `Future`,
                    # python 3.7+ only requires the type of input is `Awaitable`
                    # TODO(Catch-Bull): remove create_task when ray drop python 3.6
                    runtime_env_setup_task = create_task(
                        _setup_runtime_env(
                            runtime_env,
                            serialized_env,
                            request.serialized_allocated_resource_instances,
                        ))
                    runtime_env_context = await asyncio.wait_for(
                        runtime_env_setup_task, timeout=setup_timeout_seconds)
                    serialized_context = runtime_env_context.serialize()
                    error_message = None
                    break
                except Exception as e:
                    err_msg = f"Failed to create runtime env {serialized_env}."
                    self._logger.exception(err_msg)
                    error_message = "".join(
                        traceback.format_exception(type(e), e,
                                                   e.__traceback__))
                    await asyncio.sleep(
                        runtime_env_consts.RUNTIME_ENV_RETRY_INTERVAL_MS / 1000
                    )
            if error_message:
                self._logger.error(
                    "Runtime env creation failed for %d times, "
                    "don't retry any more.",
                    runtime_env_consts.RUNTIME_ENV_RETRY_TIMES,
                )
                return False, None, error_message
            else:
                self._logger.info(
                    "Successfully created runtime env: %s, the context: %s",
                    serialized_env,
                    serialized_context,
                )
                return True, serialized_context, None

        try:
            serialized_env = request.serialized_runtime_env
            runtime_env = RuntimeEnv.deserialize(serialized_env)
        except Exception as e:
            self._logger.exception("[Increase] Failed to parse runtime env: "
                                   f"{serialized_env}")
            return runtime_env_agent_pb2.GetOrCreateRuntimeEnvReply(
                status=agent_manager_pb2.AGENT_RPC_STATUS_FAILED,
                error_message="".join(
                    traceback.format_exception(type(e), e, e.__traceback__)),
            )

        # Increase reference
        self._reference_table.increase_reference(runtime_env, serialized_env,
                                                 request.source_process)

        if serialized_env not in self._env_locks:
            # async lock to prevent the same env being concurrently installed
            self._env_locks[serialized_env] = asyncio.Lock()

        async with self._env_locks[serialized_env]:
            if serialized_env in self._env_cache:
                serialized_context = self._env_cache[serialized_env]
                result = self._env_cache[serialized_env]
                if result.success:
                    context = result.result
                    self._logger.info("Runtime env already created "
                                      f"successfully. Env: {serialized_env}, "
                                      f"context: {context}")
                    return runtime_env_agent_pb2.GetOrCreateRuntimeEnvReply(
                        status=agent_manager_pb2.AGENT_RPC_STATUS_OK,
                        serialized_runtime_env_context=context,
                    )
                else:
                    error_message = result.result
                    self._logger.info("Runtime env already failed. "
                                      f"Env: {serialized_env}, "
                                      f"err: {error_message}")
                    # Recover the reference.
                    self._reference_table.decrease_reference(
                        runtime_env, serialized_env, request.source_process)
                    return runtime_env_agent_pb2.GetOrCreateRuntimeEnvReply(
                        status=agent_manager_pb2.AGENT_RPC_STATUS_FAILED,
                        error_message=error_message,
                    )

            if SLEEP_FOR_TESTING_S:
                self._logger.info(f"Sleeping for {SLEEP_FOR_TESTING_S}s.")
                time.sleep(int(SLEEP_FOR_TESTING_S))

            runtime_env_config = RuntimeEnvConfig.from_proto(
                request.runtime_env_config)
            # accroding to the document of `asyncio.wait_for`,
            # None means disable timeout logic
            setup_timeout_seconds = (
                None if runtime_env_config["setup_timeout_seconds"] == -1 else
                runtime_env_config["setup_timeout_seconds"])

            start = time.perf_counter()
            (
                successful,
                serialized_context,
                error_message,
            ) = await _create_runtime_env_with_retry(
                runtime_env,
                serialized_env,
                request.serialized_allocated_resource_instances,
                setup_timeout_seconds,
            )
            creation_time_ms = int(
                round((time.perf_counter() - start) * 1000, 0))
            if not successful:
                # Recover the reference.
                self._reference_table.decrease_reference(
                    runtime_env, serialized_env, request.source_process)
            # Add the result to env cache.
            self._env_cache[serialized_env] = CreatedEnvResult(
                successful,
                serialized_context if successful else error_message,
                creation_time_ms,
            )
            # Reply the RPC
            return runtime_env_agent_pb2.GetOrCreateRuntimeEnvReply(
                status=agent_manager_pb2.AGENT_RPC_STATUS_OK
                if successful else agent_manager_pb2.AGENT_RPC_STATUS_FAILED,
                serialized_runtime_env_context=serialized_context,
                error_message=error_message,
            )