def run(self): self.provider = StaticProvider(__name__) self.fieldNames = SharedPV(initial=NTScalar('as').wrap( {'value': ['pid%02x' % i for i in range(31)]}), handler=DefaultPVHandler(self)) # 'i' (integer) or 'f' (float) self.fieldTypes = SharedPV(initial=NTScalar('aB').wrap( {'value': [ord('i')] * 31}), handler=DefaultPVHandler(self)) self.fieldMask = SharedPV(initial=NTScalar('I').wrap({'value': 0x8000}), handler=DefaultPVHandler(self)) self.payload = SharedPV(initial=Value(Type([]), {}), handler=DefaultPVHandler(self)) print('Hosting {:}HPS:FIELDMASK'.format(self.prefix)) self.provider.add(self.prefix + 'HPS:FIELDNAMES', self.fieldNames) self.provider.add(self.prefix + 'HPS:FIELDTYPES', self.fieldTypes) self.provider.add(self.prefix + 'HPS:FIELDMASK', self.fieldMask) self.provider.add(self.prefix + 'PAYLOAD', self.payload) self.update() try: Server.forever(providers=[self.provider]) except: print('Server exited')
def __init__(self, provider, prefix): self.provider = provider self.prefix = prefix # Make configuration one PV access for each readout channel self.daqConfig = MySharedPV(daqConfig,self.updateDaqConfig) self.ready = SharedPV(initial=NTScalar('I').wrap({'value' : 0}), handler=DefaultPVHandler()) self.provider.add(prefix+':CONFIG',self.daqConfig) self.provider.add(prefix+':READY' ,self.ready) # Monitoring self.fwBuild = SharedPV(initial=NTScalar('s').wrap({'value':''}), handler=DefaultPVHandler()) self.monTiming = MySharedPV(monTiming) self.monPgp = MySharedPV(monPgp) self.monRawBuf = MySharedPV(monBuf) self.monFexBuf = MySharedPV(monBuf) self.monRawDet = MySharedPV(monBufDetail) self.monEnv = MySharedPV(monEnv) self.monAdc = MySharedPV(monAdc) self.monJesd = MySharedPV(monJesd) self.monJesdTtl = MySharedPV(monJesdTtl) self.provider.add(prefix+':FWBUILD' ,self.fwBuild) self.provider.add(prefix+':MONTIMING' ,self.monTiming) self.provider.add(prefix+':MONPGP' ,self.monPgp) self.provider.add(prefix+':MONRAWBUF' ,self.monRawBuf) self.provider.add(prefix+':MONFEXBUF' ,self.monFexBuf) self.provider.add(prefix+':MONRAWDET' ,self.monRawDet) self.provider.add(prefix+':MONENV' ,self.monEnv) self.provider.add(prefix+':MONADC' ,self.monAdc) self.provider.add(prefix+':MONJESD' ,self.monJesd) self.provider.add(prefix+':MONJESDTTL',self.monJesdTtl)
def __init__(self, provider, name, idx, dev, chd, cuMode=False): self._dev = dev self._chd = chd self._cuMode = cuMode pv = SharedPV(initial=NTScalar('aI').wrap([0 * 2049]), handler=DefaultPVHandler()) provider.add(name + ':MMCM%d' % idx, pv) self._pv = pv pv = SharedPV(initial=NTScalar('aI').wrap([0 * 2049]), handler=DefaultPVHandler()) provider.add(name + ':IMMCM%d' % idx, pv) self._ipv = pv # self.update() pv = SharedPV(initial=NTScalar('I').wrap(0), handler=PVHandler(self.set)) provider.add(name + ':SetMmcm%d' % idx, pv) self._set = pv pv = SharedPV(initial=NTScalar('I').wrap(0), handler=self) provider.add(name + ':ResetMmcm%d' % idx, pv) self._reset = pv
def __init__(self, provider_name, prefix): self.provider = StaticProvider(provider_name) self.prefix = prefix self.pvs = [] self.fieldNames = SharedPV(initial=NTScalar('as').wrap( {'value': ['pid%02x' % i for i in range(31)]}), handler=DefaultPVHandler(self)) # 'i' (integer) or 'f' (float) self.fieldTypes = SharedPV(initial=NTScalar('aB').wrap( {'value': [ord('i')] * 31}), handler=DefaultPVHandler(self)) self.fieldMask = SharedPV(initial=NTScalar('I').wrap({'value': 0x1}), handler=DefaultPVHandler(self)) self.payload = SharedPV(initial=Value(Type([]), {}), handler=DefaultPVHandler(self)) self.provider.add(prefix + 'HPS:FIELDNAMES', self.fieldNames) self.provider.add(prefix + 'HPS:FIELDTYPES', self.fieldTypes) self.provider.add(prefix + 'HPS:FIELDMASK', self.fieldMask) self.provider.add(prefix + 'PAYLOAD', self.payload) self.update()
def addPV(label, init, reg, archive): pvu = SharedPV(initial=NTScalar('f').wrap(init * 7000. / 1300), handler=DefaultPVHandler()) provider.add(name + ':' + label + '_ns', pvu) pv = SharedPV(initial=NTScalar('I').wrap(init), handler=CuDelayH(reg, archive, pvu)) provider.add(name + ':' + label, pv) reg.set(init) return pv
class MyExample(object): @rpc(NTScalar("d")) def add(self, lhs, rhs): return float(lhs) + float(rhs) @rpc(NTScalar("s")) def echo(self, value, delay=1): print("Start echo", value, "wait", delay) time.sleep(float(delay)) print("End echo", value, "wait", delay) return value
def get_pv_type(self, data): if isinstance(data, np.ndarray): return NTNDArray() elif isinstance(data, bool): return NTScalar('?') elif isinstance(data, int): return NTScalar('l') elif isinstance(data, float): return NTScalar('d') else: return NTObject()
def addPV(label,reg,init=0,set=False): pvu = SharedPV(initial=NTScalar('f').wrap(init*1400/1.3), handler=DefaultPVHandler()) provider.add(name+':'+label+'_ns',pvu) pv = SharedPV(initial=NTScalar('I').wrap(init), handler=L0DelayH(reg,self._app.partition,group,pvu)) provider.add(name+':'+label,pv) if set: self._app.partition.set(group) reg.set(pipelinedepth_from_delay(init)) return pv
def test_update_handler_does_not_include_alarm_details_if_unchanged_in_subsequent_updates( ): producer = FakeProducer() context = FakeContext() pv_timestamp_s = 1.1 # seconds from unix epoch pv_source_name = "source_name" pv_value = -3 pv_type = "i" alarm_status = 4 # Indicates RECORD alarm, we map the alarm message to a specific alarm status to forward alarm_severity = 1 # AlarmSeverity.MINOR alarm_message = "HIGH_ALARM" pva_update_handler = PVAUpdateHandler(producer, context, pv_source_name, "output_topic", "f142") # type: ignore context.call_monitor_callback_with_fake_pv_update( NTScalar(pv_type, valueAlarm=True).wrap( { "value": pv_value, "alarm": { "status": alarm_status, "severity": alarm_severity, "message": alarm_message, }, }, timestamp=pv_timestamp_s, )) # Second update, with unchanged alarm context.call_monitor_callback_with_fake_pv_update( NTScalar(pv_type, valueAlarm=True).wrap( { "value": pv_value, "alarm": { "status": alarm_status, "severity": alarm_severity, "message": alarm_message, }, }, timestamp=pv_timestamp_s, )) assert producer.messages_published == 2 pv_update_output = deserialise_f142(producer.published_payload) assert pv_update_output.alarm_status == AlarmStatus.NO_CHANGE assert pv_update_output.alarm_severity == AlarmSeverity.NO_CHANGE pva_update_handler.stop()
def test_update_handler_publishes_periodic_update(): producer = FakeProducer() context = FakeContext() pv_timestamp_s = 1.1 # seconds from unix epoch pv_source_name = "source_name" pv_value = -3 pv_type = "i" update_period_ms = 10 pva_update_handler = PVAUpdateHandler(producer, context, pv_source_name, "output_topic", "f142", update_period_ms) # type: ignore context.call_monitor_callback_with_fake_pv_update( NTScalar(pv_type, valueAlarm=True).wrap(pv_value, timestamp=pv_timestamp_s)) assert producer.published_payload is not None pv_update_output = deserialise_f142(producer.published_payload) assert pv_update_output.value == pv_value assert pv_update_output.source_name == pv_source_name sleep(0.05) assert ( producer.messages_published > 1 ), "Expected more than the 1 message from triggered update due to periodic updates being active" pva_update_handler.stop()
def addPV(label,cmd): pv = SharedPV(initial=NTScalar('I').wrap(0), handler=PVHandler(cmd)) name = self._name+':'+label print('Registering {:}'.format(name)) self.provider.add(name,pv) self._pv.append(pv)
def __init__(self): self.provider = StaticProvider('ttfex_provider') self.pvs = {} pv = SharedPV(nt=NTScalar('d'), initial=0.0) self.provider.add('DRP:ATM:CAM:01:Pva:FLTPOS', pv) self.pvs['FLTPOS'] = pv
def addPV(label, init, reg, archive): pv = SharedPV(initial=NTScalar('I').wrap(init), handler=RegArrayH(reg, archive=archive)) provider.add(name + ':' + label, pv) for r in reg.values(): r.set(init) return pv
def get_type(key, value): if key in byte_fields: return NTBytes() elif key in object_fields: return NTObject() else: return NTScalar(value)
def addPV(label, cmd, init=0, set=False): pv = SharedPV(initial=NTScalar('I').wrap(init), handler=PVHandler(cmd)) provider.add(name + ':' + label, pv) if set: cmd(pv, init) return pv
def __init__(self, provider, name, dev): self._dev = dev pv = SharedPV(initial=NTScalar('f').wrap(0), handler=DefaultPVHandler()) provider.add(name + ':CuPhase', pv) self._pv = pv
def update_info(self, data): # add the unaggregated version of the pvs for key, value in data.items(): pvname = self.info_pvname(key) if pvname not in self.pvs: self.create_pv(pvname, NTScalar('as'), value) else: self.pvs[pvname].post(value)
def addPV(label, reg, init=0, set=False): pv = SharedPV(initial=NTScalar('I').wrap(init), handler=IdxRegH(reg, self._app.partition, group)) provider.add(name + ':' + label, pv) if set: self._app.partition.set(group) reg.set(init) return pv
def update(self, cycle): # The following section will throw an exception if the CuInput PV is not set properly if cycle < 10: print('pvseq in %d' % (10 - cycle)) elif cycle == 10: self._seq = PVSeq(provider, self._name + ':SEQENG:0', self._ip, Engine(0, self._xpm.SeqEng_0)) self._pv_dumpSeq = SharedPV(initial=NTScalar('I').wrap(0), handler=CmdH(self._seq._eng.dump)) provider.add(self._name + ':DumpSeq', self._pv_dumpSeq) global countdn # check for config save if countdn > 0: countdn -= 1 if countdn == 0 and self._db: # save config print('Updating {}'.format(self._db)) db_url, db_name, db_instrument, db_alias = self._db.split( ',', 4) mycdb = cdb.configdb(db_url, db_instrument, True, db_name, user=db_instrument + 'opr', password='******') mycdb.add_device_config('xpm') top = cdict() top.setInfo('xpm', self._name, None, 'serial1234', 'No comment') top.setAlg('config', [0, 0, 0]) lock.acquire() top.set('XTPG.CuDelay', self._xpm.CuGenerator.cuDelay.get(), 'UINT32') top.set('XTPG.CuBeamCode', self._xpm.CuGenerator.cuBeamCode.get(), 'UINT8') top.set('XTPG.CuInput', self._xpm.AxiSy56040.OutputConfig[0].get(), 'UINT8') v = [] for i in range(8): self._xpm.XpmApp.partition.set(i) v.append(self._xpm.XpmApp.l0Delay.get()) top.set('PART.L0Delay', v, 'UINT32') lock.release() if not db_alias in mycdb.get_aliases(): mycdb.add_alias(db_alias) try: mycdb.modify_device(db_alias, top) except: pass
def __init__(self, provider_name, prefix): self.provider = StaticProvider(provider_name) self.prefix = prefix # Monitoring self.image = SharedPV(initial=NTScalar('ai').wrap({'value': [0] * 128}), handler=DefaultPVHandler()) self.provider.add(prefix, self.image)
def __init__(self, p, m, name, ip, xpm, stats): global provider provider = p global lock lock = m # Assign transmit link ID ip_comp = ip.split('.') v = (0xff0000 | (int(ip_comp[2]) << 8) | int(ip_comp[3])) << 4 xpm.XpmApp.paddr.set(v) print('Set PADDR to 0x{:x}'.format(v)) self._ip = ip self._links = [] for i in range(24): self._links.append(LinkCtrls(name, xpm, i)) app = xpm.XpmApp self._pv_amcDumpPLL = [] for i in range(2): pv = SharedPV(initial=NTScalar('I').wrap(0), handler=IdxCmdH(app.amcPLL.Dump, app.amc, i)) provider.add(name + ':DumpPll%d' % i, pv) self._pv_amcDumpPLL.append(pv) self._cu = CuGenCtrls(name + ':XTPG', xpm) self._group = GroupCtrls(name, app, stats) self._seq = PVSeq(provider, name + ':SEQENG:0', ip, Engine(0, xpm.SeqEng_0)) self._pv_dumpSeq = SharedPV(initial=NTScalar('I').wrap(0), handler=IdxCmdH(self._seq._eng.dump)) provider.add(name + ':DumpSeq', self._pv_dumpSeq) self._thread = threading.Thread(target=self.notify) self._thread.start()
def __init__(self, provider, prefix, start): self.provider = provider self.prefix = prefix # Make configuration one PV access for each readout channel if start: daqConfig['enable'] = ('i', 1) self.daqConfig = MySharedPV(daqConfig, self.updateDaqConfig) self.ready = SharedPV(initial=NTScalar('I').wrap({'value': 0}), handler=DefaultPVHandler()) self.provider.add(prefix + ':CONFIG', self.daqConfig) self.provider.add(prefix + ':READY', self.ready) # Monitoring self.fwBuild = SharedPV(initial=NTScalar('s').wrap({'value': ''}), handler=DefaultPVHandler()) self.fwVersion = SharedPV(initial=NTScalar('I').wrap({'value': 0}), handler=DefaultPVHandler()) self.pAddr = SharedPV(initial=NTScalar('s').wrap({'value': ''}), handler=DefaultPVHandler()) self.pAddr_u = SharedPV(initial=NTScalar('I').wrap({'value': 0}), handler=DefaultPVHandler()) self.pLink = SharedPV(initial=NTScalar('I').wrap({'value': 0}), handler=DefaultPVHandler()) self.monTiming = MySharedPV(monTiming) self.monPgp = MySharedPV(monPgp) self.monRawBuf = MySharedPV(monBuf) self.monFexBuf = MySharedPV(monBuf) self.monRawDet = MySharedPV(monBufDetail) self.monFexDet = MySharedPV(monBufDetail) self.monFlow = MySharedPV(monFlow) self.monEnv = MySharedPV(monEnv) self.monAdc = MySharedPV(monAdc) self.monJesd = MySharedPV(monJesd) self.monJesdTtl = MySharedPV(monJesdTtl) self.provider.add(prefix + ':FWBUILD', self.fwBuild) self.provider.add(prefix + ':FWVERSION', self.fwVersion) self.provider.add(prefix + ':PADDR', self.pAddr) self.provider.add(prefix + ':PADDR_U', self.pAddr_u) self.provider.add(prefix + ':PLINK', self.pLink) self.provider.add(prefix + ':MONTIMING', self.monTiming) self.provider.add(prefix + ':MONPGP', self.monPgp) self.provider.add(prefix + ':MONRAWBUF', self.monRawBuf) self.provider.add(prefix + ':MONFEXBUF', self.monFexBuf) self.provider.add(prefix + ':MONRAWDET', self.monRawDet) self.provider.add(prefix + ':MONFEXDET', self.monFexDet) self.provider.add(prefix + ':MONFLOW', self.monFlow) self.provider.add(prefix + ':MONENV', self.monEnv) self.provider.add(prefix + ':MONADC', self.monAdc) self.provider.add(prefix + ':MONJESD', self.monJesd) self.provider.add(prefix + ':MONJESDTTL', self.monJesdTtl) # Expert functions self.daqReset = MySharedPV(daqReset, self.updateDaqReset) self.provider.add(prefix + ':RESET', self.daqReset)
class PvaExportRpcHandler: def __init__(self, ctx, addr): self.ctx = ctx self.addr = addr self.comms = {} def _get_comm(self, graph): if graph not in self.comms: self.comms[graph] = ami.comm.GraphCommHandler(graph, self.addr, ctx=self.ctx) return self.comms[graph] @rpc(NTScalar('?')) def create(self, graph): return self._get_comm(graph).create() @rpc(NTScalar('?')) def destroy(self, graph): return self._get_comm(graph).destroy() @rpc(NTScalar('?')) def clear(self, graph): return self._get_comm(graph).clear() @rpc(NTScalar('?')) def reset(self, graph): return self._get_comm(graph).reset() @rpc(NTScalar('?')) def post(self, graph, topic, payload): return self._get_comm(graph)._post_dill(topic, dill.loads(payload.tobytes())) @rpc(NTScalar('as')) def names(self, graph): return self._get_comm(graph).names @rpc(NTScalar('?')) def view(self, graph, name): return self._get_comm(graph).view(name) @rpc(NTScalar('?')) def export(self, graph, name, alias): return self._get_comm(graph).export(name, alias)
def createPV(self, pvname, node): """ Takes a string prefix and a dict of pv definitions similar to pcaspy and creates PVAccess pv's for them. Example PV definitions: {'type' : 'int', 'count' : 2, 'value' : [0,0x0fffffff] } {'type' : 'float', 'value' : 156.25 } """ pvdef = {} logger.debug("Creating PV for %s", pvname) missing_specs = pvdef.keys() - set(['type', 'count', 'value', 'extra']) if missing_specs: raise Exception( "Do not have support for specifier {0} as of yet".format( ",".join(missing_specs))) try: tp = __pcastypes2p4ptype__[pvdef['type']] starting_val = pvdef.get('value', __pcastypes2startingval__[pvdef['type']]) if pvdef.get('count', 1) > 1: tp = 'a' + tp starting_val = pvdef.get( 'value', [__pcastypes2startingval__[pvdef['type']]] * pvdef['count']) init_val = {"value": starting_val} extra_defs = [] if 'extra' in pvdef: init_val.update({fn: fv for (fn, _, fv) in pvdef['extra']}) extra_defs = [(fn, __pcastypes2p4ptype__[ft]) for (fn, ft, _) in pvdef['extra']] logger.debug("NTScalar(%s, extra=%s).wrap(%s)", tp, extra_defs, init_val) pv = SharedPV(initial=NTScalar(tp, extra=extra_defs).wrap(init_val), handler=DefaultPVHandler()) except: pv = SharedPV(initial=Value(Type(pvdef['type']), pvdef['value']), handler=DefaultPVHandler()) self.pvs.append( pv ) # we must keep a reference in order to keep the Handler from being collected logger.debug("Created PV for %s", pvname) return pv
def test_update_handler_publishes_int_update(pv_value, pv_type): producer = FakeProducer() context = FakeContext() pv_timestamp_s = 1.1 # seconds from unix epoch pv_source_name = "source_name" pva_update_handler = PVAUpdateHandler(producer, context, pv_source_name, "output_topic", "f142") # type: ignore context.call_monitor_callback_with_fake_pv_update( NTScalar(pv_type, valueAlarm=True).wrap(pv_value, timestamp=pv_timestamp_s)) assert producer.published_payload is not None pv_update_output = deserialise_f142(producer.published_payload) assert pv_update_output.value == pv_value assert pv_update_output.source_name == pv_source_name pva_update_handler.stop()
def test_update_handler_publishes_alarm_update(): producer = FakeProducer() context = FakeContext() pv_value = 42 pv_type = "i" pv_timestamp_s = 1.1 # seconds from unix epoch pv_source_name = "source_name" alarm_status = 4 # Indicates RECORD alarm, we map the alarm message to a specific alarm status to forward alarm_severity = 1 # AlarmSeverity.MINOR alarm_message = "HIGH_ALARM" pva_update_handler = PVAUpdateHandler(producer, context, pv_source_name, "output_topic", "f142") # type: ignore context.call_monitor_callback_with_fake_pv_update( NTScalar(pv_type, valueAlarm=True).wrap( { "value": pv_value, "alarm": { "status": alarm_status, "severity": alarm_severity, "message": alarm_message, }, }, timestamp=pv_timestamp_s, )) assert producer.published_payload is not None pv_update_output = deserialise_f142(producer.published_payload) assert pv_update_output.value == pv_value assert pv_update_output.source_name == pv_source_name assert pv_update_output.alarm_status == AlarmStatus.HIGH assert pv_update_output.alarm_severity == AlarmSeverity.MINOR pva_update_handler.stop()
def __init__(self, p, m, name=None, ip=None, xpm=None, stats=None, db=None): global provider provider = p global lock lock = m # Assign transmit link ID ip_comp = ip.split('.') xpm_num = name.rsplit(':', 1)[1] v = 0xff00000 | ((int(xpm_num) & 0xf) << 16) | ( (int(ip_comp[2]) & 0xf) << 12) | ((int(ip_comp[3]) & 0xff) << 4) xpm.XpmApp.paddr.set(v) print('Set PADDR to 0x{:x}'.format(v)) self._name = name self._ip = ip self._xpm = xpm self._db = db init = None try: db_url, db_name, db_instrument, db_alias = db.split(',', 4) print('db {:}'.format(db)) print('url {:} name {:} instr {:} alias {:}'.format( db_url, db_name, db_instrument, db_alias)) print('device {:}'.format(name)) init = get_config_with_params(db_url, db_instrument, db_name, db_alias, name) print('cfg {:}'.format(init)) except: print('Caught exception reading configdb [{:}]'.format(db)) self._links = [] for i in range(24): self._links.append(LinkCtrls(name, xpm, i)) app = xpm.XpmApp self._pv_amcDumpPLL = [] for i in range(2): pv = SharedPV(initial=NTScalar('I').wrap(0), handler=IdxCmdH(app.amcPLL.Dump, app.amc, i)) provider.add(name + ':DumpPll%d' % i, pv) self._pv_amcDumpPLL.append(pv) self._cu = CuGenCtrls(name + ':XTPG', xpm, init=init['XTPG'] if init is not None else None) self._group = GroupCtrls(name, app, stats, init=init) ## Remove sequencer while we test Ben's image if True: self._seq = PVSeq(provider, name + ':SEQENG:0', ip, Engine(0, xpm.SeqEng_0)) self._pv_dumpSeq = SharedPV(initial=NTScalar('I').wrap(0), handler=CmdH(self._seq._eng.dump)) provider.add(name + ':DumpSeq', self._pv_dumpSeq) self._pv_usRxReset = SharedPV(initial=NTScalar('I').wrap(0), handler=CmdH(xpm.UsTiming.C_RxReset)) provider.add(name + ':Us:RxReset', self._pv_usRxReset) self._pv_cuRxReset = SharedPV(initial=NTScalar('I').wrap(0), handler=CmdH(xpm.CuTiming.C_RxReset)) provider.add(name + ':Cu:RxReset', self._pv_cuRxReset) self._thread = threading.Thread(target=self.notify) self._thread.start()
def addPV(label, reg): pv = SharedPV(initial=NTScalar('I').wrap(0), handler=RegH(reg)) provider.add(name + ':' + label, pv) return pv
def addPV(label): pv = SharedPV(initial=NTScalar('I').wrap(0), handler=DefaultPVHandler()) provider.add(name + ':' + label, pv) return pv
def addPV(label, cmd, init): pv = SharedPV(initial=NTScalar('I').wrap(init), handler=PVHandler(cmd)) provider.add(name + ':' + label + '%d' % idx, pv) cmd(pv, init) # initialize return pv