def do_init(self): super().do_init() if self._pva_server is None: self.log.info("Starting PVA server") self._provider = DynamicProvider("PvaServerComms", self) self._pva_server = Server(providers=[self._provider]) self.log.info("Started PVA server")
def quickRPCServer(provider, prefix, target, maxsize=20, workers=1, useenv=True, conf=None, isolate=False): """Run an RPC server in the current thread Calls are handled sequentially, and always in the current thread, if workers=1 (the default). If workers>1 then calls are handled concurrently by a pool of worker threads. Requires NTURI style argument encoding. :param str provider: A provider name. Must be unique in this process. :param str prefix: PV name prefix. Along with method names, must be globally unique. :param target: The object which is exporting methods. (use the :func:`rpc` decorator) :param int maxsize: Number of pending RPC calls to be queued. :param int workers: Number of worker threads (default 1) :param useenv: Passed to :class:`~p4p.server.Server` :param conf: Passed to :class:`~p4p.server.Server` :param isolate: Passed to :class:`~p4p.server.Server` """ from p4p.server import Server import time queue = ThreadedWorkQueue(maxsize=maxsize, workers=workers) provider = NTURIDispatcher(queue, target=target, prefix=prefix, name=provider) threads = [] server = Server(providers=[provider], useenv=useenv, conf=conf, isolate=isolate) with server, queue: while True: time.sleep(10.0)
def server(self): server = Server(providers=[self.provider, self.rpc_provider]) with server, self.queue: try: while True: time.sleep(100) except KeyboardInterrupt: pass
def quickRPCServer(provider, prefix, target, maxsize=20, workers=1, useenv=True, conf=None): """Run an RPC server in the current thread Calls are handled sequentially, and always in the current thread, if workers=1 (the default). If workers>1 then calls are handled concurrently by a pool of worker threads. Requires NTURI style argument encoding. :param str provider: A provider name. Must be unique in this process. :param str prefix: PV name prefix. Along with method names, must be globally unique. :param target: The object which is exporting methods. (use the :func:`rpc` decorator) :param int maxsize: Number of pending RPC calls to be queued. :param int workers: Number of worker threads (default 1) :param useenv: Passed to :class:`~p4p.server.Server` :param conf: Passed to :class:`~p4p.server.Server` """ from p4p.server import Server, installProvider, removeProvider queue = WorkQueue(maxsize=maxsize) installProvider(provider, NTURIDispatcher(queue, target=target, prefix=prefix)) try: threads = [] server = Server(providers=provider, useenv=useenv, conf=conf) try: for n in range(1, workers): T = Thread(name='%s Worker %d' % (provider, n), target=queue.handle) threads.append(T) T.start() # handle calls in the current thread until KeyboardInterrupt queue.handle() finally: try: for T in threads: queue.interrupt() T.join() finally: # we really need to do this or the process will hang on exit server.stop() finally: removeProvider(provider)
def main(args): lvl = logging.getLevelName(args.log_level) if isinstance(lvl, str): raise ValueError( "Bad level name, must be eg. ERROR, WARN, INFO, DEBUG") logging.basicConfig(level=lvl) Q = WorkQueue(maxsize=5) GM = 'minimasar.gather.' + args.gather _log.debug('Import gatherer "%s"', GM) GM = import_module(GM) gather = GM.Gatherer(queue=Q) _log.debug('Open DB "%s"', args.db) db = connect(args.db) _log.info("Install provider") M = Service(db, gather=gather.gather) _log.info("Prepare server") S = Server(providers=[ # provide MASAR style calls through a single PV (args.name) MASARDispatcher(Q, target=M, name='masar', channels=[args.name]), # provide NTRUI style calls, one PV per method, with a common prefix (args.name+':') NTURIDispatcher(Q, target=M, name='masaruri', prefix=args.name + ':'), ]) with S: _log.info("Run server") try: Q.handle() except KeyboardInterrupt: pass _log.info("Stop") _log.info("Done") db.close()
def run(self): self.provider = StaticProvider(__name__) self._pv = [] self._msgHeader = 0 def addPV(label,cmd): pv = SharedPV(initial=NTScalar('I').wrap(0), handler=PVHandler(cmd)) name = self._name+':'+label print('Registering {:}'.format(name)) self.provider.add(name,pv) self._pv.append(pv) addPV('GroupL0Reset' , self.l0Reset) addPV('GroupL0Enable' , self.l0Enable) addPV('GroupL0Disable' , self.l0Disable) addPV('GroupMsgInsert' , self.msgInsert) addPV('PART:0:Master' , self.master) addPV('PART:0:MsgHeader', self.msgHeader) with Server(providers=[self.provider]): while True: time.sleep(1)
def getargs(): from argparse import ArgumentParser P = ArgumentParser() P.add_argument('video', help='A V4L2 device (eg. /dev/video0') P.add_argument('pvname') return P args = getargs().parse_args() pv = SharedPV(nt=NTNDArray(), initial=numpy.zeros((0,0), dtype='u1')) provider = StaticProvider('capture') provider.add(args.pvname, pv) # open the capture device, and run the Server with open(args.video, 'r+b', 0) as F, Server(providers=[provider]): caps = v4l.query_capabilities(F.fileno()) print('capabilities', caps) if 'VIDEO_CAPTURE' not in caps['capabilities']: print("Not a capture device") sys.exit(1) idx = -1 for fmt in v4l.list_formats(F.fileno()): print('Supported:', fmt) if fmt['pixelformat'] in color._mangle: idx = fmt['index'] # don't break, use last. # this assumes gray scale is listed first
def main(): global pvdb pvdb = {} # start with empty dictionary global prefix prefix = '' parser = argparse.ArgumentParser(prog=sys.argv[0], description='host PVs for XPM') parser.add_argument('-P', required=True, help='e.g. DAQ:LAB2:XPM:1', metavar='PREFIX') parser.add_argument('-v', '--verbose', action='store_true', help='be verbose') parser.add_argument('--ip', type=str, required=True, help="IP address" ) parser.add_argument('--db', type=str, default=None, help="save/restore db, for example [https://pswww.slac.stanford.edu/ws-auth/devconfigdb/ws/,configDB,LAB2,PROD]") args = parser.parse_args() if args.verbose: # logging.basicConfig(level=logging.DEBUG) setVerbose(True) # Set base base = pr.Root(name='AMCc',description='') base.add(Top( name = 'XPM', ipAddr = args.ip )) # Start the system base.start( pollEn = False, initRead = False, zmqPort = None, ) xpm = base.XPM app = base.XPM.XpmApp # Print the AxiVersion Summary xpm.AxiVersion.printStatus() provider = StaticProvider(__name__) lock = Lock() pvstats = PVStats(provider, lock, args.P, xpm) pvctrls = PVCtrls(provider, lock, name=args.P, ip=args.ip, xpm=xpm, stats=pvstats._groups, db=args.db) pvxtpg = PVXTpg(provider, lock, args.P, xpm, xpm.mmcmParms, cuMode='xtpg' in xpm.AxiVersion.ImageName.get()) # process PVA transactions updatePeriod = 1.0 with Server(providers=[provider]): try: if pvxtpg is not None: pvxtpg .init() pvstats.init() while True: prev = time.perf_counter() if pvxtpg is not None: pvxtpg .update() pvstats.update() pvctrls.update() curr = time.perf_counter() delta = prev+updatePeriod-curr # print('Delta {:.2f} Update {:.2f} curr {:.2f} prev {:.2f}'.format(delta,curr-prev,curr,prev)) if delta>0: time.sleep(delta) except KeyboardInterrupt: pass
def main(): global pvdb pvdb = {} # start with empty dictionary global prefix prefix = '' global provider parser = argparse.ArgumentParser(prog=sys.argv[0], description='host PVs for KCU') parser.add_argument('-i', '--interval', type=int, help='PV update interval', default=10) parser.add_argument('-H', '--hsd', action='store_true', help='HSD node', default=False) args = parser.parse_args() # Set base base = pr.Root(name='KCUr', description='') coreMap = rogue.hardware.axi.AxiMemMap('/dev/datadev_0') base.add(Top(memBase=coreMap)) # Start the system base.start( pollEn=False, initRead=False, zmqPort=None, ) kcu = base.KCU if args.hsd: kcu.I2cBus.selectDevice('QSFP0') print(kcu.I2cBus.QSFP0.getRxPwr()) else: print(kcu.TDetTiming.getClkRates()) print(kcu.TDetSemi.getRTT()) provider = StaticProvider(__name__) pvstats = PVStats( 'DAQ:LAB2:' + socket.gethostname().replace('-', '_').upper(), kcu, args.hsd) # process PVA transactions updatePeriod = args.interval with Server(providers=[provider]): try: pvstats.init() while True: prev = time.perf_counter() pvstats.update() curr = time.perf_counter() delta = prev + updatePeriod - curr # print('Delta {:.2f} Update {:.2f} curr {:.2f} prev {:.2f}'.format(delta,curr-prev,curr,prev)) if delta > 0: time.sleep(delta) except KeyboardInterrupt: pass
initial = self.pos final = op.value() delta = abs(final-initial) op.info("Moving %s -> %s"%(initial, final)) while delta>=1.0: op.info("Moving %s"%delta) delta -= 1.0 cothread.Sleep(1.0) # move at 1 step per second self.pos = final op.done() finally: self.busy = False pv = SharedPV(nt=NTScalar('d'), initial=0.0, handler=MoveHandler()) provider = StaticProvider('move') # 'move' is an arbitrary name provider.add("foo", pv) with Server(providers=[provider]): print('Running') try: cothread.WaitForQuit() except KeyboardInterrupt: pass print('Done')
def main(self): cli = Context() pvs = {} # table of detected "features" self.features = pvs[args.output + 'features'] = SharedPV( nt=NTTable(columns=[ ('X', 'd'), ('Y', 'd'), ('W', 'd'), ('H', 'd'), ('idx', 'd'), ]), initial=[]) # output image (example) self.imgOut = pvs[args.output + 'img'] = SharedPV(nt=NTNDArray(), initial=np.zeros( (0, 0), dtype='u1')) # display execution time self.execTime = pvs[args.output + 'etime'] = SharedPV( nt=NTScalar('d', display=True), initial={ 'value': 0.0, 'display.units': 's', }) # background threshold level bg = pvs[args.output + 'bg'] = SharedPV(nt=NTScalar('I', display=True), initial={ 'value': self.bgLvl, 'display.units': 'px', }) @bg.put def set_bg(pv, op): self.bgLvl = max(1, int(op.value())) pv.post(self.bgLvl) op.done() # image flattening mode imode = pvs[args.output + 'imode'] = SharedPV( nt=NTEnum(), initial={'choices': [e.name for e in ImageMode]}) @imode.put def set_imode(pv, op): self.imode = ImageMode(op.value()) pv.post(self.imode) op.done() # separately publish info of largest feature self.X = pvs[args.output + 'x'] = SharedPV(nt=NTScalar('d'), initial=0.0) self.Y = pvs[args.output + 'y'] = SharedPV(nt=NTScalar('d'), initial=0.0) self.W = pvs[args.output + 'w'] = SharedPV(nt=NTScalar('d'), initial=0.0) self.H = pvs[args.output + 'h'] = SharedPV(nt=NTScalar('d'), initial=0.0) print("Output PVs", list(pvs.keys())) # subscribe to input image PV and run local server with cli.monitor(self.args.input, self.on_image, request='record[pipeline=true,queueSize=2]'), Server( providers=[pvs]): # park while work happens in other tasks done = threading.Event() signal.signal(signal.SIGINT, lambda x, y: done.set()) done.wait()
if not self.pv.isOpen(): _log.info("Open %s", self.count) self.pv.open(NT.wrap(self.count)) else: _log.info("Tick %s", self.count) self.pv.post(NT.wrap(self.count)) self.count += 1 def onLastDisconnect(self, pv): _log.info("Last client disconnects") # mark in-active, but don't immediately close() self.active = False def put(self, pv, op): # force counter value self.count = op.value().value op.done() pv = SharedPV(handler=LazyCounter()) with Server(providers=[{'foo': pv}]): print('Running') try: cothread.WaitForQuit() except KeyboardInterrupt: pass print('Done')
def main(): global pvdb pvdb = {} # start with empty dictionary global prefix prefix = '' parser = argparse.ArgumentParser(prog=sys.argv[0], description='host PVs for XPM') parser.add_argument('-P', required=True, help='e.g. DAQ:LAB2:XPM:1', metavar='PREFIX') parser.add_argument('-v', '--verbose', action='store_true', help='be verbose') parser.add_argument('--ip', type=str, required=True, help="IP address") parser.add_argument( '--db', type=str, default=None, help= "save/restore db, for example [https://pswww.slac.stanford.edu/ws-auth/devconfigdb/ws/,configDB,LAB2,PROD]" ) parser.add_argument('-I', action='store_true', help='initialize Cu timing') parser.add_argument('-L', action='store_true', help='bypass AMC Locks') parser.add_argument('-F', type=float, default=1.076923e-6, help='fiducial period (sec)') parser.add_argument('-C', type=int, default=200, help='clocks per fiducial') args = parser.parse_args() if args.verbose: # logging.basicConfig(level=logging.DEBUG) setVerbose(True) # Set base base = pr.Root(name='AMCc', description='') base.add(Top( name='XPM', ipAddr=args.ip, fidPrescale=args.C, )) # Start the system base.start( # pollEn = False, # initRead = False, # zmqPort = None, ) xpm = base.XPM app = base.XPM.XpmApp # Print the AxiVersion Summary xpm.AxiVersion.printStatus() provider = StaticProvider(__name__) lock = Lock() pvstats = PVStats(provider, lock, args.P, xpm, args.F) # pvctrls = PVCtrls(provider, lock, name=args.P, ip=args.ip, xpm=xpm, stats=pvstats._groups, handle=pvstats.handle, db=args.db, cuInit=True) pvctrls = PVCtrls(provider, lock, name=args.P, ip=args.ip, xpm=xpm, stats=pvstats._groups, handle=pvstats.handle, db=args.db, cuInit=args.I, fidPrescale=args.C, fidPeriod=args.F * 1.e9) pvxtpg = None # process PVA transactions updatePeriod = 1.0 cycle = 0 with Server(providers=[provider]): try: if pvxtpg is not None: pvxtpg.init() pvstats.init() while True: prev = time.perf_counter() pvstats.update(cycle) pvctrls.update(cycle) # We have to delay the startup of some classes if cycle == 5: pvxtpg = PVXTpg(provider, lock, args.P, xpm, xpm.mmcmParms, cuMode='xtpg' in xpm.AxiVersion.ImageName.get(), bypassLock=args.L) pvxtpg.init() elif cycle < 5: print('pvxtpg in %d' % (5 - cycle)) if pvxtpg is not None: pvxtpg.update() curr = time.perf_counter() delta = prev + updatePeriod - curr # print('Delta {:.2f} Update {:.2f} curr {:.2f} prev {:.2f}'.format(delta,curr-prev,curr,prev)) if delta > 0: time.sleep(delta) cycle += 1 except KeyboardInterrupt: pass
def run(self): with Server(providers=[self.provider]) as self.server: for data in self.read_timetool(): for name, value in data.items(): self.pvs[name].post(value)