def start(self): L.info("Starting %s Model Service.", self.name) pva_server = PVAServer(providers=[{ f"SIMULACRUM:SYS0:1:{self.name}:LIVE:TWISS": self.live_twiss_pv, f"SIMULACRUM:SYS0:1:{self.name}:DESIGN:TWISS": self.design_twiss_pv, f"SIMULACRUM:SYS0:1:{self.name}:LIVE:RMAT": self.live_rmat_pv, f"SIMULACRUM:SYS0:1:{self.name}:DESIGN:RMAT": self.design_rmat_pv, }]) try: zmq_task = self.loop.create_task(self.recv()) pva_refresh_task = self.loop.create_task(self.refresh_pva_table()) broadcast_task = self.loop.create_task( self.broadcast_model_changes()) jitter_task = self.loop.create_task(self.add_jitter()) self.loop.run_forever() except KeyboardInterrupt: L.info("Shutting down Model Service.") zmq_task.cancel() pva_refresh_task.cancel() broadcast_task.cancel() pva_server.stop() finally: self.loop.close() L.info("Model Service shutdown complete.")
def quickRPCServer(provider, prefix, target, maxsize=20, workers=1, useenv=True, conf=None): """Run an RPC server in the current thread Calls are handled sequentially, and always in the current thread, if workers=1 (the default). If workers>1 then calls are handled concurrently by a pool of worker threads. Requires NTURI style argument encoding. :param str provider: A provider name. Must be unique in this process. :param str prefix: PV name prefix. Along with method names, must be globally unique. :param target: The object which is exporting methods. (use the :func:`rpc` decorator) :param int maxsize: Number of pending RPC calls to be queued. :param int workers: Number of worker threads (default 1) :param useenv: Passed to :class:`~p4p.server.Server` :param conf: Passed to :class:`~p4p.server.Server` """ from p4p.server import Server, installProvider, removeProvider queue = WorkQueue(maxsize=maxsize) installProvider(provider, NTURIDispatcher(queue, target=target, prefix=prefix)) try: threads = [] server = Server(providers=provider, useenv=useenv, conf=conf) try: for n in range(1, workers): T = Thread(name='%s Worker %d' % (provider, n), target=queue.handle) threads.append(T) T.start() # handle calls in the current thread until KeyboardInterrupt queue.handle() finally: try: for T in threads: queue.interrupt() T.join() finally: # we really need to do this or the process will hang on exit server.stop() finally: removeProvider(provider)
def start(self): L.info("Starting Model Service.") pva_server = PVAServer(providers=[{ "BMAD:SYS0:1:FULL_MACHINE:LIVE:TWISS": self.live_twiss_pv, "BMAD:SYS0:1:FULL_MACHINE:DESIGN:TWISS": self.design_twiss_pv }]) zmq_task = self.loop.create_task(self.recv()) pva_refresh_task = self.loop.create_task(self.refresh_pva_table()) broadcast_task = self.loop.create_task(self.broadcast_model_changes()) try: self.loop.run_until_complete(zmq_task) except KeyboardInterrupt: zmq_task.cancel() pva_refresh_task.cancel() broadcast_task.cancel() pva_server.stop()
class PvaServerComms(builtin.controllers.ServerComms): """A class for communication between pva client and server""" def __init__(self, mri: builtin.controllers.AMri) -> None: super().__init__(mri) self._pva_server = None self._provider = None self._published: Set[str] = set() self._pvs: Dict[str, Dict[Optional[str], SharedPV]] = {} # Hooks self.register_hooked(ProcessPublishHook, self.publish) # Need camelCase as called by p4p Server # noinspection PyPep8Naming def testChannel(self, channel_name: str) -> bool: if channel_name in self._published: # Someone is asking for a Block return True elif "." in channel_name: # Someone is asking for the field of a Block mri, field = channel_name.rsplit(".", 1) return mri in self._published else: # We don't have it return False # Need camelCase as called by p4p Server # noinspection PyPep8Naming def makeChannel(self, channel_name: str, src: str) -> SharedPV: # Need to spawn as we take a lock here and in process return cothread.CallbackResult(self._make_channel, channel_name, src, callback_timeout=1.0) def _make_channel(self, channel_name: str, src: str) -> SharedPV: self.log.debug(f"Making PV {channel_name} for {src}") if channel_name in self._published: # Someone is asking for a Block mri = channel_name field = None elif "." in channel_name: # Someone is asking for the field of a Block mri, field = channel_name.rsplit(".", 1) else: raise NameError("Bad channel %s" % channel_name) with self._lock: pvs = self._pvs.setdefault(mri, {}) try: pv = pvs[field] except KeyError: assert self.process, "No attached process" controller = self.process.get_controller(mri) handler = BlockHandler(controller, field) # We want any client passing a pvRequest field() to ONLY receive # that field. The default behaviour of p4p is to send a masked # version of the full structure. The mapperMode option allows us # to tell p4p to send a slice instead # https://github.com/mdavidsaver/pvDataCPP/blob/master/src/copy/pv/createRequest.h#L76 pv = SharedPV(handler=handler, options={"mapperMode": "Slice"}) pvs[field] = pv return pv def do_init(self): super().do_init() if self._pva_server is None: self.log.info("Starting PVA server") self._provider = DynamicProvider("PvaServerComms", self) self._pva_server = Server(providers=[self._provider]) self.log.info("Started PVA server") def do_disable(self): super().do_disable() if self._pva_server is not None: self.log.info("Stopping PVA server") # Stop the server self._pva_server.stop() # Disconnect everyone self.disconnect_pv_clients(list(self._pvs)) # Get rid of the server reference so we can't stop again self._pva_server = None self.log.info("Stopped PVA server") @add_call_types def publish(self, published: APublished) -> None: self._published = set(published) if self._pva_server: with self._lock: mris = [mri for mri in self._pvs if mri not in published] # Delete blocks we no longer have self.disconnect_pv_clients(mris) def disconnect_pv_clients(self, mris: List[str]) -> None: """Disconnect anyone listening to any of the given mris""" for mri in mris: for pv in self._pvs.pop(mri, {}).values(): # Close pv with force destroy on, this will call # onLastDisconnect pv.close(destroy=True, sync=True, timeout=1.0)