def __init__(self, provider_name, prefix): self.provider = StaticProvider(provider_name) self.prefix = prefix self.pvs = [] self.fieldNames = SharedPV(initial=NTScalar('as').wrap( {'value': ['pid%02x' % i for i in range(31)]}), handler=DefaultPVHandler(self)) # 'i' (integer) or 'f' (float) self.fieldTypes = SharedPV(initial=NTScalar('aB').wrap( {'value': [ord('i')] * 31}), handler=DefaultPVHandler(self)) self.fieldMask = SharedPV(initial=NTScalar('I').wrap({'value': 0x1}), handler=DefaultPVHandler(self)) self.payload = SharedPV(initial=Value(Type([]), {}), handler=DefaultPVHandler(self)) self.provider.add(prefix + 'HPS:FIELDNAMES', self.fieldNames) self.provider.add(prefix + 'HPS:FIELDTYPES', self.fieldTypes) self.provider.add(prefix + 'HPS:FIELDMASK', self.fieldMask) self.provider.add(prefix + 'PAYLOAD', self.payload) self.update()
def run(self): self.provider = StaticProvider(__name__) self.fieldNames = SharedPV(initial=NTScalar('as').wrap( {'value': ['pid%02x' % i for i in range(31)]}), handler=DefaultPVHandler(self)) # 'i' (integer) or 'f' (float) self.fieldTypes = SharedPV(initial=NTScalar('aB').wrap( {'value': [ord('i')] * 31}), handler=DefaultPVHandler(self)) self.fieldMask = SharedPV(initial=NTScalar('I').wrap({'value': 0x8000}), handler=DefaultPVHandler(self)) self.payload = SharedPV(initial=Value(Type([]), {}), handler=DefaultPVHandler(self)) print('Hosting {:}HPS:FIELDMASK'.format(self.prefix)) self.provider.add(self.prefix + 'HPS:FIELDNAMES', self.fieldNames) self.provider.add(self.prefix + 'HPS:FIELDTYPES', self.fieldTypes) self.provider.add(self.prefix + 'HPS:FIELDMASK', self.fieldMask) self.provider.add(self.prefix + 'PAYLOAD', self.payload) self.update() try: Server.forever(providers=[self.provider]) except: print('Server exited')
def __init__(self, provider_name, *, base, root, pvMap=None, syncRead=True): self._srv = StaticProvider(provider_name) self._root = root self._base = base self._log = pyrogue.logInit(cls=self) self._syncRead = syncRead self.pvs = [] if not root.running: raise Exception( "Epics can not be setup on a tree which is not started") if pvMap is None: doAll = True self._pvMap = {} else: doAll = False self._pvMap = pvMap # Create PVs for v in self._root.variableList: self._addPv(v, doAll)
def __init__(self): self.provider = StaticProvider('ttfex_provider') self.pvs = {} pv = SharedPV(nt=NTScalar('d'), initial=0.0) self.provider.add('DRP:ATM:CAM:01:Pva:FLTPOS', pv) self.pvs['FLTPOS'] = pv
def __init__(self, name, comm_addr, export_addr, aggregate=False): self.base = name self.ctx = zmq.Context() self.export = self.ctx.socket(zmq.SUB) self.export.setsockopt_string(zmq.SUBSCRIBE, "") self.export.connect(export_addr) self.comm = self.ctx.socket(zmq.REQ) self.comm.connect(comm_addr) self.queue = ThreadedWorkQueue(maxsize=20, workers=1) # pva server provider self.provider = StaticProvider(name) self.rpc_provider = NTURIDispatcher(self.queue, target=PvaExportRpcHandler( self.ctx, comm_addr), name="%s:cmd" % self.base, prefix="%s:cmd:" % self.base) self.server_thread = threading.Thread(target=self.server, name='pvaserv') self.server_thread.daemon = True self.aggregate = aggregate self.pvs = {} self.ignored = set() self.graph_pvbase = "ana" self.data_pvbase = "data" self.info_pvbase = "info" self.cmd_pvs = {'command'} self.payload_cmd_pvs = {'add', 'set', 'del'}
class PVCtrls(threading.Thread): def __init__(self, name, app): threading.Thread.__init__(self,daemon=True) self._name = name self._app = app.XpmMini # initialize timestamp tnow = datetime.datetime.now() t0 = datetime.datetime(1990,1,1) # epics epoch ts = int((tnow-t0).total_seconds())<<32 app.TPGMiniCore.TStampWr.set(ts) app.XpmMini.Pipeline_Depth_Clks.set(95*200) app.XpmMini.Pipeline_Depth_Fids.set(95) def run(self): self.provider = StaticProvider(__name__) self._pv = [] self._msgHeader = 0 def addPV(label,cmd): pv = SharedPV(initial=NTScalar('I').wrap(0), handler=PVHandler(cmd)) name = self._name+':'+label print('Registering {:}'.format(name)) self.provider.add(name,pv) self._pv.append(pv) addPV('GroupL0Reset' , self.l0Reset) addPV('GroupL0Enable' , self.l0Enable) addPV('GroupL0Disable' , self.l0Disable) addPV('GroupMsgInsert' , self.msgInsert) addPV('PART:0:Master' , self.master) addPV('PART:0:MsgHeader', self.msgHeader) with Server(providers=[self.provider]): while True: time.sleep(1) def l0Reset(self, pv, val): self._app.Config_L0Select_Reset.set(1) time.sleep(0.01) self._app.Config_L0Select_Reset.set(0) def l0Enable(self, pv, val): self._app.Config_L0Select_Enabled.set(True) def l0Disable(self, pv, val): self._app.Config_L0Select_Enabled.set(False) def msgInsert(self, pv, val): if val>0: print('Sending Transition {:}'.format(self._msgHeader)) self._app.SendTransition(self._msgHeader) def msgHeader(self, pv, val): self._msgHeader = val def master(self, pv, val): self._app.HwEnable.set(True)
class PVAServer(object): def __init__(self, provider_name, prefix): self.provider = StaticProvider(provider_name) self.prefix = prefix self.pvs = [] self.fieldNames = SharedPV(initial=NTScalar('as').wrap( {'value': ['pid%02x' % i for i in range(31)]}), handler=DefaultPVHandler(self)) # 'i' (integer) or 'f' (float) self.fieldTypes = SharedPV(initial=NTScalar('aB').wrap( {'value': [ord('i')] * 31}), handler=DefaultPVHandler(self)) self.fieldMask = SharedPV(initial=NTScalar('I').wrap({'value': 0x8000}), handler=DefaultPVHandler(self)) self.payload = SharedPV(initial=Value(Type([]), {}), handler=DefaultPVHandler(self)) self.provider.add(prefix + 'HPS:FIELDNAMES', self.fieldNames) self.provider.add(prefix + 'HPS:FIELDTYPES', self.fieldTypes) self.provider.add(prefix + 'HPS:FIELDMASK', self.fieldMask) self.provider.add(prefix + 'PAYLOAD', self.payload) self.update() def update(self): mask = self.fieldMask.current().get('value') names = self.fieldNames.current().get('value') types = self.fieldTypes.current().get('value') oid = self.payload.current().getID() nid = str(mask) if nid == oid: nid += 'a' ntypes = [] nvalues = {} ntypes.append(('valid', 'i')) nvalues['valid'] = 0 for i in range(31): if mask & 1: ntypes.append((names[i], chr(types[i]))) nvalues[names[i]] = 0 mask >>= 1 pvname = self.prefix + 'PAYLOAD' self.provider.remove(pvname) self.payload = SharedPV(initial=Value(Type(ntypes, id=nid), nvalues), handler=DefaultPVHandler(self)) print('Payload struct ID %s' % self.payload.current().getID()) self.provider.add(pvname, self.payload) def forever(self): Server.forever(providers=[self.provider])
def __init__(self, provider_name, prefix): self.provider = StaticProvider(provider_name) self.prefix = prefix # Monitoring self.image = SharedPV(initial=NTScalar('ai').wrap({'value': [0] * 128}), handler=DefaultPVHandler()) self.provider.add(prefix, self.image)
class PVAServer(object): def __init__(self, provider_name): self.provider = StaticProvider(provider_name) self.pvs = [] def createPV(self, prefix, pvdefs): """ Takes a string prefix and a dict of pv definitions similar to pcaspy and creates PVAccess pv's for them. Example PV definitions: {'type' : 'int', 'count' : 2, 'value' : [0,0x0fffffff] } {'type' : 'float', 'value' : 156.25 } """ for name, pvdef in pvdefs.items(): logger.debug("Creating PV for %s", prefix + name) missing_specs = pvdef.keys() - set( ['type', 'count', 'value', 'extra']) if missing_specs: raise Exception( "Do not have support for specifier {0} as of yet".format( ",".join(missing_specs))) try: tp = __pcastypes2p4ptype__[pvdef['type']] starting_val = pvdef.get( 'value', __pcastypes2startingval__[pvdef['type']]) if pvdef.get('count', 1) > 1: tp = 'a' + tp starting_val = pvdef.get( 'value', [__pcastypes2startingval__[pvdef['type']]] * pvdef['count']) init_val = {"value": starting_val} extra_defs = [] if 'extra' in pvdef: init_val.update({fn: fv for (fn, _, fv) in pvdef['extra']}) extra_defs = [(fn, __pcastypes2p4ptype__[ft]) for (fn, ft, _) in pvdef['extra']] logger.debug("NTScalar(%s, extra=%s).wrap(%s)", tp, extra_defs, init_val) pv = SharedPV(initial=NTScalar( tp, extra=extra_defs).wrap(init_val), handler=DefaultPVHandler()) except: pv = SharedPV(initial=Value(Type(pvdef['type']), pvdef['value']), handler=DefaultPVHandler()) self.provider.add(prefix + name, pv) self.pvs.append( pv ) # we must keep a reference in order to keep the Handler from being collected logger.debug("Created PV for %s", prefix + name) def forever(self): Server.forever(providers=[self.provider])
class PVAServer(object): def __init__(self, provider_name, prefix): self.provider = StaticProvider(provider_name) self.prefix = prefix # Monitoring self.image = SharedPV(initial=NTScalar('ai').wrap({'value': [0] * 128}), handler=DefaultPVHandler()) self.provider.add(prefix, self.image) def forever(self): Server.forever(providers=[self.provider])
def main(args): provider = StaticProvider('mailbox') # 'mailbox' is an arbitrary name pvs = [ ] # we must keep a reference in order to keep the Handler from being collected for name in args.name: pv = SharedPV(initial=types['int'], handler=MailboxHandler()) provider.add(name, pv) pvs.append(pv) Server.forever(providers=[provider]) print('Done')
class PVAServer: def __init__(self): self.provider = StaticProvider('ttfex_provider') self.pvs = {} pv = SharedPV(nt=NTScalar('d'), initial=0.0) self.provider.add('DRP:ATM:CAM:01:Pva:FLTPOS', pv) self.pvs['FLTPOS'] = pv def read_timetool(self): while True: ds = psana.DataSource(shmem='tmo') for run in ds.runs(): det = run.Detector('tmoopal2') for evt in run.events(): time.sleep(2.0) yield {'FLTPOS', det.ttfex.fltpos(evt)} def run(self): with Server(providers=[self.provider]) as self.server: for data in self.read_timetool(): for name, value in data.items(): self.pvs[name].post(value)
def run(self): self.provider = StaticProvider(__name__) self._pv = [] self._msgHeader = 0 def addPV(label,cmd): pv = SharedPV(initial=NTScalar('I').wrap(0), handler=PVHandler(cmd)) name = self._name+':'+label print('Registering {:}'.format(name)) self.provider.add(name,pv) self._pv.append(pv) addPV('GroupL0Reset' , self.l0Reset) addPV('GroupL0Enable' , self.l0Enable) addPV('GroupL0Disable' , self.l0Disable) addPV('GroupMsgInsert' , self.msgInsert) addPV('PART:0:Master' , self.master) addPV('PART:0:MsgHeader', self.msgHeader) with Server(providers=[self.provider]): while True: time.sleep(1)
from p4p.server import Server, StaticProvider from . import v4l, color def getargs(): from argparse import ArgumentParser P = ArgumentParser() P.add_argument('video', help='A V4L2 device (eg. /dev/video0') P.add_argument('pvname') return P args = getargs().parse_args() pv = SharedPV(nt=NTNDArray(), initial=numpy.zeros((0,0), dtype='u1')) provider = StaticProvider('capture') provider.add(args.pvname, pv) # open the capture device, and run the Server with open(args.video, 'r+b', 0) as F, Server(providers=[provider]): caps = v4l.query_capabilities(F.fileno()) print('capabilities', caps) if 'VIDEO_CAPTURE' not in caps['capabilities']: print("Not a capture device") sys.exit(1) idx = -1 for fmt in v4l.list_formats(F.fileno()): print('Supported:', fmt)
def main(): global pvdb pvdb = {} # start with empty dictionary global prefix prefix = '' parser = argparse.ArgumentParser(prog=sys.argv[0], description='host PVs for XPM') parser.add_argument('-P', required=True, help='e.g. DAQ:LAB2:XPM:1', metavar='PREFIX') parser.add_argument('-v', '--verbose', action='store_true', help='be verbose') parser.add_argument('--ip', type=str, required=True, help="IP address" ) parser.add_argument('--db', type=str, default=None, help="save/restore db, for example [https://pswww.slac.stanford.edu/ws-auth/devconfigdb/ws/,configDB,LAB2,PROD]") args = parser.parse_args() if args.verbose: # logging.basicConfig(level=logging.DEBUG) setVerbose(True) # Set base base = pr.Root(name='AMCc',description='') base.add(Top( name = 'XPM', ipAddr = args.ip )) # Start the system base.start( pollEn = False, initRead = False, zmqPort = None, ) xpm = base.XPM app = base.XPM.XpmApp # Print the AxiVersion Summary xpm.AxiVersion.printStatus() provider = StaticProvider(__name__) lock = Lock() pvstats = PVStats(provider, lock, args.P, xpm) pvctrls = PVCtrls(provider, lock, name=args.P, ip=args.ip, xpm=xpm, stats=pvstats._groups, db=args.db) pvxtpg = PVXTpg(provider, lock, args.P, xpm, xpm.mmcmParms, cuMode='xtpg' in xpm.AxiVersion.ImageName.get()) # process PVA transactions updatePeriod = 1.0 with Server(providers=[provider]): try: if pvxtpg is not None: pvxtpg .init() pvstats.init() while True: prev = time.perf_counter() if pvxtpg is not None: pvxtpg .update() pvstats.update() pvctrls.update() curr = time.perf_counter() delta = prev+updatePeriod-curr # print('Delta {:.2f} Update {:.2f} curr {:.2f} prev {:.2f}'.format(delta,curr-prev,curr,prev)) if delta>0: time.sleep(delta) except KeyboardInterrupt: pass
def __init__(self, provider_name): self.provider = StaticProvider(provider_name) self.pvs = []
class PVAServer(object): def __init__(self, provider_name, *, base, root, pvMap=None, syncRead=True): self._srv = StaticProvider(provider_name) self._root = root self._base = base self._log = pyrogue.logInit(cls=self) self._syncRead = syncRead self.pvs = [] if not root.running: raise Exception( "Epics can not be setup on a tree which is not started") if pvMap is None: doAll = True self._pvMap = {} else: doAll = False self._pvMap = pvMap # Create PVs for v in self._root.variableList: self._addPv(v, doAll) def _addPv(self, node, doAll): eName = self._base + ':' if doAll: eName += node.path.replace('.', ':') self._pvMap[node.path] = eName elif node.path in self._pvMap: eName = self._pvMap[node.path] else: return if isinstance(node, pyrogue.BaseCommand): self._srv.add(self.createPV(eName, node)) self._log.info("Adding command {} mapped to {}".format( node.path, eName)) else: # Add standard variable evar = self.createPV(eName, node) # node.addListener(evar.varUpdated) self._srv.add(evar) self._log.info("Adding variable {} mapped to {}".format( node.path, eName)) def createPV(self, pvname, node): """ Takes a string prefix and a dict of pv definitions similar to pcaspy and creates PVAccess pv's for them. Example PV definitions: {'type' : 'int', 'count' : 2, 'value' : [0,0x0fffffff] } {'type' : 'float', 'value' : 156.25 } """ pvdef = {} logger.debug("Creating PV for %s", pvname) missing_specs = pvdef.keys() - set(['type', 'count', 'value', 'extra']) if missing_specs: raise Exception( "Do not have support for specifier {0} as of yet".format( ",".join(missing_specs))) try: tp = __pcastypes2p4ptype__[pvdef['type']] starting_val = pvdef.get('value', __pcastypes2startingval__[pvdef['type']]) if pvdef.get('count', 1) > 1: tp = 'a' + tp starting_val = pvdef.get( 'value', [__pcastypes2startingval__[pvdef['type']]] * pvdef['count']) init_val = {"value": starting_val} extra_defs = [] if 'extra' in pvdef: init_val.update({fn: fv for (fn, _, fv) in pvdef['extra']}) extra_defs = [(fn, __pcastypes2p4ptype__[ft]) for (fn, ft, _) in pvdef['extra']] logger.debug("NTScalar(%s, extra=%s).wrap(%s)", tp, extra_defs, init_val) pv = SharedPV(initial=NTScalar(tp, extra=extra_defs).wrap(init_val), handler=DefaultPVHandler()) except: pv = SharedPV(initial=Value(Type(pvdef['type']), pvdef['value']), handler=DefaultPVHandler()) self.pvs.append( pv ) # we must keep a reference in order to keep the Handler from being collected logger.debug("Created PV for %s", pvname) return pv def forever(self): Server.forever(providers=[self.provider])
def __init__(self, provider_name, prefix): self.provider = StaticProvider(provider_name) self.a = ChipServer(self.provider, prefix+':A') self.b = ChipServer(self.provider, prefix+':B')
class PVCtrls(threading.Thread): def __init__(self, prefix, app): threading.Thread.__init__(self, daemon=True) self.prefix = prefix + ':' self.app = app def run(self): self.provider = StaticProvider(__name__) self.fieldNames = SharedPV(initial=NTScalar('as').wrap( {'value': ['pid%02x' % i for i in range(31)]}), handler=DefaultPVHandler(self)) # 'i' (integer) or 'f' (float) self.fieldTypes = SharedPV(initial=NTScalar('aB').wrap( {'value': [ord('i')] * 31}), handler=DefaultPVHandler(self)) self.fieldMask = SharedPV(initial=NTScalar('I').wrap({'value': 0x8000}), handler=DefaultPVHandler(self)) self.payload = SharedPV(initial=Value(Type([]), {}), handler=DefaultPVHandler(self)) print('Hosting {:}HPS:FIELDMASK'.format(self.prefix)) self.provider.add(self.prefix + 'HPS:FIELDNAMES', self.fieldNames) self.provider.add(self.prefix + 'HPS:FIELDTYPES', self.fieldTypes) self.provider.add(self.prefix + 'HPS:FIELDMASK', self.fieldMask) self.provider.add(self.prefix + 'PAYLOAD', self.payload) self.update() try: Server.forever(providers=[self.provider]) except: print('Server exited') def update(self): self.app.Enable.set(0) mask = self.fieldMask.current().get('value') names = self.fieldNames.current().get('value') types = self.fieldTypes.current().get('value') oid = self.payload.current().getID() nid = str(mask) print('PVCtrls.update mask[{:x}] oid[{:}]'.format(mask, oid)) if nid == oid: nid += 'a' ntypes = [] nvalues = {} ntypes.append(('valid', 'i')) nvalues['valid'] = 0 mmask = mask for i in range(31): if mmask & 1: ntypes.append((names[i], chr(types[i]))) nvalues[names[i]] = 0 mmask >>= 1 pvname = self.prefix + 'PAYLOAD' self.provider.remove(pvname) self.payload = SharedPV(initial=Value(Type(ntypes, id=nid), nvalues), handler=DefaultPVHandler(self)) print('Payload struct ID %s' % self.payload.current().getID()) self.provider.add(pvname, self.payload) if mask: self.app.channelMask.set(mask) self.app.Enable.set(1)
def main(): global pvdb pvdb = {} # start with empty dictionary global prefix prefix = '' parser = argparse.ArgumentParser(prog=sys.argv[0], description='host PVs for XPM') parser.add_argument('-P', required=True, help='e.g. DAQ:LAB2:XPM:1', metavar='PREFIX') parser.add_argument('-v', '--verbose', action='store_true', help='be verbose') parser.add_argument('--ip', type=str, required=True, help="IP address") parser.add_argument( '--db', type=str, default=None, help= "save/restore db, for example [https://pswww.slac.stanford.edu/ws-auth/devconfigdb/ws/,configDB,LAB2,PROD]" ) parser.add_argument('-I', action='store_true', help='initialize Cu timing') parser.add_argument('-L', action='store_true', help='bypass AMC Locks') parser.add_argument('-F', type=float, default=1.076923e-6, help='fiducial period (sec)') parser.add_argument('-C', type=int, default=200, help='clocks per fiducial') args = parser.parse_args() if args.verbose: # logging.basicConfig(level=logging.DEBUG) setVerbose(True) # Set base base = pr.Root(name='AMCc', description='') base.add(Top( name='XPM', ipAddr=args.ip, fidPrescale=args.C, )) # Start the system base.start( # pollEn = False, # initRead = False, # zmqPort = None, ) xpm = base.XPM app = base.XPM.XpmApp # Print the AxiVersion Summary xpm.AxiVersion.printStatus() provider = StaticProvider(__name__) lock = Lock() pvstats = PVStats(provider, lock, args.P, xpm, args.F) # pvctrls = PVCtrls(provider, lock, name=args.P, ip=args.ip, xpm=xpm, stats=pvstats._groups, handle=pvstats.handle, db=args.db, cuInit=True) pvctrls = PVCtrls(provider, lock, name=args.P, ip=args.ip, xpm=xpm, stats=pvstats._groups, handle=pvstats.handle, db=args.db, cuInit=args.I, fidPrescale=args.C, fidPeriod=args.F * 1.e9) pvxtpg = None # process PVA transactions updatePeriod = 1.0 cycle = 0 with Server(providers=[provider]): try: if pvxtpg is not None: pvxtpg.init() pvstats.init() while True: prev = time.perf_counter() pvstats.update(cycle) pvctrls.update(cycle) # We have to delay the startup of some classes if cycle == 5: pvxtpg = PVXTpg(provider, lock, args.P, xpm, xpm.mmcmParms, cuMode='xtpg' in xpm.AxiVersion.ImageName.get(), bypassLock=args.L) pvxtpg.init() elif cycle < 5: print('pvxtpg in %d' % (5 - cycle)) if pvxtpg is not None: pvxtpg.update() curr = time.perf_counter() delta = prev + updatePeriod - curr # print('Delta {:.2f} Update {:.2f} curr {:.2f} prev {:.2f}'.format(delta,curr-prev,curr,prev)) if delta > 0: time.sleep(delta) cycle += 1 except KeyboardInterrupt: pass
def main(): global pvdb pvdb = {} # start with empty dictionary global prefix prefix = '' global provider parser = argparse.ArgumentParser(prog=sys.argv[0], description='host PVs for KCU') parser.add_argument('-i', '--interval', type=int, help='PV update interval', default=10) parser.add_argument('-H', '--hsd', action='store_true', help='HSD node', default=False) args = parser.parse_args() # Set base base = pr.Root(name='KCUr', description='') coreMap = rogue.hardware.axi.AxiMemMap('/dev/datadev_0') base.add(Top(memBase=coreMap)) # Start the system base.start( pollEn=False, initRead=False, zmqPort=None, ) kcu = base.KCU if args.hsd: kcu.I2cBus.selectDevice('QSFP0') print(kcu.I2cBus.QSFP0.getRxPwr()) else: print(kcu.TDetTiming.getClkRates()) print(kcu.TDetSemi.getRTT()) provider = StaticProvider(__name__) pvstats = PVStats( 'DAQ:LAB2:' + socket.gethostname().replace('-', '_').upper(), kcu, args.hsd) # process PVA transactions updatePeriod = args.interval with Server(providers=[provider]): try: pvstats.init() while True: prev = time.perf_counter() pvstats.update() curr = time.perf_counter() delta = prev + updatePeriod - curr # print('Delta {:.2f} Update {:.2f} curr {:.2f} prev {:.2f}'.format(delta,curr-prev,curr,prev)) if delta > 0: time.sleep(delta) except KeyboardInterrupt: pass
prefix = sys.argv[1] list_type = NTScalar('as') types = { 'int': NTScalar('i').wrap(0), 'float': NTScalar('d').wrap(0.0), 'str': NTScalar('s').wrap(''), 'enum': NTEnum().wrap(0), } pvs_lock = Lock() pvs = {} provider = StaticProvider('dynamicbox') class MailboxHandler(object): def put(self, pv, op): pv.post(op.value()) op.done() addpv = SharedPV(initial=NTScalar('s').wrap('Only RPC')) delpv = SharedPV(initial=NTScalar('s').wrap('Only RPC')) listpv = SharedPV(nt=list_type, initial=[]) provider.add(prefix + "add", addpv) provider.add(prefix + "del", delpv) provider.add(prefix + "list", listpv)
initial = self.pos final = op.value() delta = abs(final-initial) op.info("Moving %s -> %s"%(initial, final)) while delta>=1.0: op.info("Moving %s"%delta) delta -= 1.0 cothread.Sleep(1.0) # move at 1 step per second self.pos = final op.done() finally: self.busy = False pv = SharedPV(nt=NTScalar('d'), initial=0.0, handler=MoveHandler()) provider = StaticProvider('move') # 'move' is an arbitrary name provider.add("foo", pv) with Server(providers=[provider]): print('Running') try: cothread.WaitForQuit() except KeyboardInterrupt: pass print('Done')
else: _log.info("Tick %s", self.count) self.pv.post(NT.wrap(self.count)) self.count += 1 def onLastDisconnect(self, pv): _log.info("Last client disconnects") # mark in-active, but don't immediately close() self.active = False def put(self, pv, op): # force counter value self.count = op.value().value op.done() pv = SharedPV(handler=LazyCounter()) provider = StaticProvider('lazy') # 'lazy' is an arbitrary name provider.add("foo", pv) with Server(providers=[provider]): print('Running') try: cothread.WaitForQuit() except KeyboardInterrupt: pass print('Done')
def __init__(self, provider_name, prefix, start): self.provider = StaticProvider(provider_name) self.chip = ChipServer(self.provider, prefix, start)
class PvaExportServer: def __init__(self, name, comm_addr, export_addr, aggregate=False): self.base = name self.ctx = zmq.Context() self.export = self.ctx.socket(zmq.SUB) self.export.setsockopt_string(zmq.SUBSCRIBE, "") self.export.connect(export_addr) self.comm = self.ctx.socket(zmq.REQ) self.comm.connect(comm_addr) self.queue = ThreadedWorkQueue(maxsize=20, workers=1) # pva server provider self.provider = StaticProvider(name) self.rpc_provider = NTURIDispatcher(self.queue, target=PvaExportRpcHandler( self.ctx, comm_addr), name="%s:cmd" % self.base, prefix="%s:cmd:" % self.base) self.server_thread = threading.Thread(target=self.server, name='pvaserv') self.server_thread.daemon = True self.aggregate = aggregate self.pvs = {} self.ignored = set() self.graph_pvbase = "ana" self.data_pvbase = "data" self.info_pvbase = "info" self.cmd_pvs = {'command'} self.payload_cmd_pvs = {'add', 'set', 'del'} def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def close(self): self.ctx.destroy() @staticmethod def join_pv(*args): return ":".join(args) def graph_pvname(self, graph, name=None): if name is not None: return ":".join([self.graph_pvbase, graph, name]) else: return ":".join([self.graph_pvbase, graph]) def data_pvname(self, graph, name): return ":".join([self.graph_pvbase, graph, self.data_pvbase, name]) def info_pvname(self, name): return ":".join([self.info_pvbase, name]) def find_graph_pvnames(self, graph, names): return [ name for name in names if name.startswith(self.graph_pvname(graph)) ] def create_pv(self, name, nt, initial, func=None): if func is not None: pv = SharedPV(nt=nt, initial=initial, handler=PvaExportPutHandler(put=func)) else: pv = SharedPV(nt=nt, initial=initial) self.provider.add('%s:%s' % (self.base, name), pv) self.pvs[name] = pv def create_bytes_pv(self, name, initial, func=None): self.create_pv(name, NTBytes(), initial, func=func) def valid(self, name, group=None): return not name.startswith('_') def get_pv_type(self, data): if isinstance(data, np.ndarray): return NTNDArray() elif isinstance(data, bool): return NTScalar('?') elif isinstance(data, int): return NTScalar('l') elif isinstance(data, float): return NTScalar('d') else: return NTObject() def update_graph(self, graph, data): # add the unaggregated version of the pvs for key, value in data.items(): if key in NTGraph.flat_schema: name, nttype = NTGraph.flat_schema[key] pvname = self.graph_pvname(graph, name) if pvname not in self.pvs: self.create_pv(pvname, nttype, value) else: self.pvs[pvname].post(value) # add the aggregated graph pv if requested if self.aggregate: pvname = self.graph_pvname(graph) if pvname not in self.pvs: logger.debug("Creating pv for info on the graph") self.create_pv(pvname, NTGraph(), data) else: self.pvs[pvname].post(data) def update_store(self, graph, data): # add the unaggregated version of the pvs for key, value in data.items(): if key in NTStore.flat_schema: name, nttype = NTStore.flat_schema[key] pvname = self.graph_pvname(graph, name) if pvname not in self.pvs: self.create_pv(pvname, nttype, value) else: self.pvs[pvname].post(value) # add the aggregated graph pv if requested if self.aggregate: pvname = self.graph_pvname(graph, 'store') if pvname not in self.pvs: logger.debug("Creating pv for info on the store") self.create_pv(pvname, NTStore(), data) else: self.pvs[pvname].post(data) def update_heartbeat(self, graph, heartbeat): pvname = self.graph_pvname(graph, 'heartbeat') if pvname not in self.pvs: self.create_pv(pvname, NTScalar('d'), heartbeat) else: self.pvs[pvname].post(heartbeat) def update_info(self, data): # add the unaggregated version of the pvs for key, value in data.items(): pvname = self.info_pvname(key) if pvname not in self.pvs: self.create_pv(pvname, NTScalar('as'), value) else: self.pvs[pvname].post(value) def update_data(self, graph, name, data): pvname = self.data_pvname(graph, name) if pvname not in self.ignored: if pvname not in self.pvs: pv_type = self.get_pv_type(data) if pv_type is not None: logger.debug("Creating new pv named %s for graph %s", name, graph) self.create_pv(pvname, pv_type, data) else: logger.warn( "Cannot map type of '%s' from graph '%s' to PV: %s", name, graph, type(data)) self.ignored.add(pvname) else: self.pvs[pvname].post(data) def update_destroy(self, graph): # close all the pvs associated with the purged graph for name in self.find_graph_pvnames(graph, self.pvs): logger.debug("Removing pv named %s for graph %s", name, graph) self.provider.remove('%s:%s' % (self.base, name)) del self.pvs[name] # remove any ignored pvs associated with the purged graph for name in self.find_graph_pvnames(graph, self.ignored): self.ignored.remove(name) def server(self): server = Server(providers=[self.provider, self.rpc_provider]) with server, self.queue: try: while True: time.sleep(100) except KeyboardInterrupt: pass def run(self): # start the pva server thread self.server_thread.start() logger.info("Starting PVA data export server") while True: topic = self.export.recv_string() graph = self.export.recv_string() exports = self.export.recv_pyobj() if topic == 'data': for name, data in exports.items(): # ignore names starting with '_' - these are private if self.valid(name): self.update_data(graph, name, data) elif topic == 'graph': self.update_graph(graph, exports) elif topic == 'store': self.update_store(graph, exports) elif topic == 'heartbeat': self.update_heartbeat(graph, exports) elif topic == 'info': self.update_info(exports) elif topic == 'destroy': self.update_destroy(graph) else: logger.warn("No handler for topic: %s", topic)