Example #1
0
 def _blocking_send(
         self,
         req: ray_client_pb2.DataRequest) -> ray_client_pb2.DataResponse:
     if self._in_shutdown:
         from ray.util import disconnect
         disconnect()
         raise ConnectionError(
             "Request can't be sent because the data channel is "
             "terminated. This is likely because the data channel "
             "disconnected at some point before this request was "
             "prepared. Ray Client has been disconnected.")
     req_id = self._next_id()
     req.req_id = req_id
     self.request_queue.put(req)
     data = None
     with self.cv:
         self.cv.wait_for(
             lambda: req_id in self.ready_data or self._in_shutdown)
         if self._in_shutdown:
             from ray.util import disconnect
             disconnect()
             raise ConnectionError(
                 "Sending request failed because the data channel "
                 "terminated. This is usually due to an error "
                 f"in handling the most recent request: {req}. Ray Client "
                 "has been disconnected.")
         data = self.ready_data[req_id]
         del self.ready_data[req_id]
     return data
Example #2
0
 def _async_send(self,
                 req: ray_client_pb2.DataRequest,
                 callback: Optional[ResponseCallable] = None) -> None:
     req_id = self._next_id()
     req.req_id = req_id
     if callback:
         self.asyncio_waiting_data[req_id] = callback
     self.request_queue.put(req)
Example #3
0
 def _async_send(self,
                 req: ray_client_pb2.DataRequest,
                 callback: Optional[ResponseCallable] = None) -> None:
     with self.lock:
         self._check_shutdown()
         req_id = self._next_id()
         req.req_id = req_id
         self.asyncio_waiting_data[req_id] = callback
         self.outstanding_requests[req_id] = req
         self.request_queue.put(req)
Example #4
0
 def _blocking_send(self, req: ray_client_pb2.DataRequest
                    ) -> ray_client_pb2.DataResponse:
     req_id = self._next_id()
     req.req_id = req_id
     self.request_queue.put(req)
     data = None
     with self.cv:
         self.cv.wait_for(lambda: req_id in self.ready_data)
         data = self.ready_data[req_id]
         del self.ready_data[req_id]
     return data
Example #5
0
def _should_cache(req: ray_client_pb2.DataRequest) -> bool:
    """
    Returns True if the response should to the given request should be cached,
    false otherwise. At the moment the only requests we do not cache are:
        - asynchronous gets: These arrive out of order. Skipping caching here
            is fine, since repeating an async get is idempotent
        - acks: Repeating acks is idempotent
        - clean up requests: Also idempotent, and client has likely already
             wrapped up the data connection by this point.
    """
    req_type = req.WhichOneof("type")
    if req_type == "get" and req.get.asynchronous:
        return False
    return req_type not in ("acknowledge", "connection_cleanup")
Example #6
0
 def _blocking_send(
         self,
         req: ray_client_pb2.DataRequest) -> ray_client_pb2.DataResponse:
     req_id = self._next_id()
     req.req_id = req_id
     self.request_queue.put(req)
     data = None
     with self.cv:
         self.cv.wait_for(
             lambda: req_id in self.ready_data or self._in_shutdown)
         if self._in_shutdown:
             raise ConnectionError(
                 f"cannot send request {req}: data channel shutting down")
         data = self.ready_data[req_id]
         del self.ready_data[req_id]
     return data
Example #7
0
 def _async_send(self,
                 req: ray_client_pb2.DataRequest,
                 callback: Optional[ResponseCallable] = None) -> None:
     if self._in_shutdown:
         from ray.util import disconnect
         disconnect()
         raise ConnectionError(
             "Request can't be sent because the data channel is "
             "terminated. This is likely because the data channel "
             "disconnected at some point before this request was "
             "prepared. Ray Client has been disconnected.")
     req_id = self._next_id()
     req.req_id = req_id
     if callback:
         self.asyncio_waiting_data[req_id] = callback
     self.request_queue.put(req)
Example #8
0
 def _blocking_send(self, req: ray_client_pb2.DataRequest
                    ) -> ray_client_pb2.DataResponse:
     req_id = self._next_id()
     req.req_id = req_id
     self.request_queue.put(req)
     data = None
     with self.cv:
         self.cv.wait_for(
             lambda: req_id in self.ready_data or self._in_shutdown)
         if self._in_shutdown:
             raise ConnectionError(
                 "Sending request failed because the data channel "
                 "terminated. This is usually due to an error "
                 f"in handling the most recent request: {req}")
         data = self.ready_data[req_id]
         del self.ready_data[req_id]
     return data
Example #9
0
    def _blocking_send(
        self, req: ray_client_pb2.DataRequest
    ) -> ray_client_pb2.DataResponse:
        with self.lock:
            self._check_shutdown()
            req_id = self._next_id()
            req.req_id = req_id
            self.request_queue.put(req)
            self.outstanding_requests[req_id] = req

            self.cv.wait_for(lambda: req_id in self.ready_data or self._in_shutdown)
            self._check_shutdown()

            data = self.ready_data[req_id]
            del self.ready_data[req_id]
            del self.outstanding_requests[req_id]
            self._acknowledge(req_id)

        return data
Example #10
0
def prepare_runtime_init_req(
    init_request: ray_client_pb2.DataRequest
) -> Tuple[ray_client_pb2.DataRequest, JobConfig]:
    """
    Extract JobConfig and possibly mutate InitRequest before it is passed to
    the specific RayClient Server.
    """
    init_type = init_request.WhichOneof("type")
    assert init_type == "init", ("Received initial message of type "
                                 f"{init_type}, not 'init'.")
    req = init_request.init
    job_config = JobConfig()
    if req.job_config:
        job_config = pickle.loads(req.job_config)
    new_job_config = ray_client_server_env_prep(job_config)
    modified_init_req = ray_client_pb2.InitRequest(
        job_config=pickle.dumps(new_job_config))

    init_request.init.CopyFrom(modified_init_req)
    return (init_request, new_job_config)
Example #11
0
def _should_cache(req: ray_client_pb2.DataRequest) -> bool:
    """
    Returns True if the response should to the given request should be cached,
    false otherwise. At the moment the only requests we do not cache are:
        - asynchronous gets: These arrive out of order. Skipping caching here
            is fine, since repeating an async get is idempotent
        - acks: Repeating acks is idempotent
        - clean up requests: Also idempotent, and client has likely already
             wrapped up the data connection by this point.
        - puts: We should only cache when we receive the final chunk, since
             any earlier chunks won't generate a response
        - tasks: We should only cache when we receive the final chunk,
             since any earlier chunks won't generate a response
    """
    req_type = req.WhichOneof("type")
    if req_type == "get" and req.get.asynchronous:
        return False
    if req_type == "put":
        return req.put.chunk_id == req.put.total_chunks - 1
    if req_type == "task":
        return req.task.chunk_id == req.task.total_chunks - 1
    return req_type not in ("acknowledge", "connection_cleanup")
Example #12
0
 def _async_send(self, req: ray_client_pb2.DataRequest) -> None:
     req_id = self._next_id()
     req.req_id = req_id
     self.request_queue.put(req)