def req_server_meta(dealer: zmq.Socket) -> ServerMeta: dealer.send(_server_meta_req_cache) server_meta = serializer.loads(dealer.recv()) if server_meta.version != __version__: raise RuntimeError( "The server version didn't match. " "Please make sure the server (%r) is using the same version of ZProc as this client (%r)." % (server_meta.version, __version__)) return server_meta
def ping(server_address: str, *, timeout: float = None, payload: Union[bytes] = None) -> int: """ Ping the zproc server. This can be used to easily detect if a server is alive and running, with the aid of a suitable ``timeout``. :param server_address: .. include:: /api/snippets/server_address.rst :param timeout: The timeout in seconds. If this is set to ``None``, then it will block forever, until the zproc server replies. For all other values, it will wait for a reply, for that amount of time before returning with a :py:class:`TimeoutError`. By default it is set to ``None``. :param payload: payload that will be sent to the server. If it is set to None, then ``os.urandom(56)`` (56 random bytes) will be used. (No real reason for the ``56`` magic number.) :return: The zproc server's **pid**. """ if payload is None: payload = os.urandom(56) with util.create_zmq_ctx() as zmq_ctx: with zmq_ctx.socket(zmq.DEALER) as dealer_sock: dealer_sock.connect(server_address) if timeout is not None: dealer_sock.setsockopt(zmq.RCVTIMEO, int(timeout * 1000)) dealer_sock.send( serializer.dumps({ Msgs.cmd: Cmds.ping, Msgs.info: payload })) try: recv_payload, pid = serializer.loads(dealer_sock.recv()) except zmq.error.Again: raise TimeoutError( "Timed-out waiting while for the ZProc server to respond.") assert ( recv_payload == payload ), "Payload doesn't match! The server connection may be compromised, or unstable." return pid
def wait(self, timeout: Union[int, float] = None): """ Wait until this process finishes execution, then return the value returned by the ``target``. This method raises a a :py:exc:`.ProcessWaitError`, if the child Process exits with a non-zero exitcode, or if something goes wrong while communicating with the child. :param timeout: The timeout in seconds. If the value is ``None``, it will block until the zproc server replies. For all other values, it will wait for a reply, for that amount of time before returning with a :py:class:`TimeoutError`. :return: The value returned by the ``target`` function. """ # try to fetch the cached result. if self._has_returned: return self._result if timeout is not None: target = time.time() + timeout while time.time() < target: self.child.join(timeout) if self.is_alive: raise TimeoutError( f"Timed-out while waiting for Process to return. -- {self!r}" ) else: self.child.join() if self.is_alive: return None exitcode = self.exitcode if exitcode != 0: raise exceptions.ProcessWaitError( f"Process finished with a non-zero exitcode ({exitcode}). -- {self!r}", exitcode, self, ) try: self._result = serializer.loads(self._result_sock.recv()) except zmq.error.Again: raise exceptions.ProcessWaitError( "The Process died before sending its return value. " "It probably crashed, got killed, or exited without warning.", exitcode, ) self._has_returned = True self._cleanup() return self._result
def recv_request(self): self.identity, request = self.state_router.recv_multipart() request = serializer.loads(request) try: self.namespace = request[Msgs.namespace] except KeyError: pass else: self.state = self.state_map[self.namespace] self.dispatch_dict[request[Msgs.cmd]](request)
def count(self, value: int): value -= self.count if value > 0: for _ in range(value): recv_conn, send_conn = multiprocessing.Pipe() process = multiprocessing.Process( target=worker_process, args=[self.server_address, send_conn]) process.start() with recv_conn: rep = recv_conn.recv_bytes() if rep: serializer.loads(rep) self.worker_list.append(process) elif value < 0: # Notify remaining workers to finish up, and close shop. for _ in range(-value): self._task_push.send_multipart(EMPTY_MULTIPART)
def _request_reply(self) -> StateUpdate: response = util.strict_request_reply( [ self.state._identity, self.state._namespace_bytes, bytes(self.identical_okay), struct.pack("d", self._only_after), ], self.state._w_dealer.send_multipart, self.state._w_dealer.recv_multipart, ) return StateUpdate(*serializer.loads(response[0]), is_identical=bool(response[1]))
def start_server( server_address: str = None, *, backend: Callable = multiprocessing.Process ) -> Tuple[multiprocessing.Process, str]: """ Start a new zproc server. :param server_address: .. include:: /api/snippets/server_address.rst :param backend: .. include:: /api/snippets/backend.rst :return: ` A `tuple``, containing a :py:class:`multiprocessing.Process` object for server and the server address. """ recv_conn, send_conn = multiprocessing.Pipe() server_process = backend(target=main, args=[server_address, send_conn]) server_process.start() try: with recv_conn: server_meta: ServerMeta = serializer.loads(recv_conn.recv_bytes()) except zmq.ZMQError as e: if e.errno == 98: raise ConnectionError( "Encountered - %s. Perhaps the server is already running?" % repr(e)) if e.errno == 22: raise ValueError( "Encountered - %s. `server_address` must be a string containing a valid endpoint." % repr(e)) raise return server_process, server_meta.state_router
def worker_process(server_address: str, send_conn): with util.socket_factory(zmq.PULL, zmq.PUSH) as (zmq_ctx, task_pull, result_push): server_meta = util.get_server_meta(zmq_ctx, server_address) try: task_pull.connect(server_meta.task_proxy_out) result_push.connect(server_meta.task_result_pull) state = State(server_address) except Exception: with send_conn: send_conn.send_bytes(serializer.dumps(RemoteException())) else: with send_conn: send_conn.send_bytes(b"") try: while True: msg = task_pull.recv_multipart() if msg == EMPTY_MULTIPART: return chunk_id, target_bytes, task_bytes = msg try: task = serializer.loads(task_bytes) target = serializer.loads_fn(target_bytes) result = run_task(target, task, state) except KeyboardInterrupt: raise except Exception: result = RemoteException() result_push.send_multipart( [chunk_id, serializer.dumps(result)]) except Exception: util.log_internal_crash("Worker process")
def _s_request_reply(self, request: Dict[int, Any]): request[Msgs.namespace] = self._namespace_bytes msg = serializer.dumps(request) return serializer.loads( util.strict_request_reply(msg, self._s_dealer.send, self._s_dealer.recv))
def _start_server(fn, _bind: Callable): recv_conn, send_conn = multiprocessing.Pipe() multiprocessing.Process(target=fn, args=[send_conn, _bind]).start() with recv_conn: return serializer.loads(recv_conn.recv_bytes())
def _get_chunk(self, index: int): chunk_id = util.encode_chunk_id(self.task_id, index) return serializer.loads( util.strict_request_reply(chunk_id, self._dealer.send, self._dealer.recv))