def hsd_unconfig(prefix): global epics_prefix epics_prefix = prefix ctxt = Context('pva') values = ctxt.get(epics_prefix + ':CONFIG') values['enable'] = 0 print(values) print(epics_prefix) ctxt.put(epics_prefix + ':CONFIG', values, wait=True) # This handshake seems to be necessary, or at least the .get() complete = False for i in range(100): complete = ctxt.get(epics_prefix + ':READY') != 0 if complete: break print('hsd_unconfig wait for complete', i) time.sleep(0.1) if complete: print('hsd unconfig complete') else: raise Exception('timed out waiting for hsd_unconfig') ctxt.close() return None
def wave8_unconfig(epics_prefix): return None; ctxt = Context('pva') ctxt.put(epics_prefix+':TriggerEventManager:TriggerEventBuffer[0]:MasterEnable', 0, wait=True) ctxt.close() return None;
def hps_connect(root): ctxt = Context('pva') d = {} d['addr'] = ctxt.get(bldName + ':ADDR') d['port'] = ctxt.get(bldName + ':PORT') print('hps_connect {:}'.format(d)) ctxt.close() return d
def hsd_connect(epics_prefix): # Retrieve connection information from EPICS ctxt = Context('pva') values = ctxt.get(epics_prefix + ':PADDR_U') print(values) ctxt.close() d = {} d['paddr'] = values return json.dumps(d)
def wave8_scan_keys(update): global prefix global ocfg # extract updates cfg = {} copy_reconfig_keys(cfg, ocfg, json.loads(update)) # Apply group ctxt = Context('pva') user_to_expert(ctxt, prefix + ':Top:', cfg, full=False) ctxt.close() # Retain mandatory fields for XTC translation for key in ('detType:RO', 'detName:RO', 'detId:RO', 'doc:RO', 'alg:RO'): copy_config_entry(cfg, ocfg, key) copy_config_entry(cfg[':types:'], ocfg[':types:'], key) return json.dumps(cfg)
def wave8_config(prefix, connect_str, cfgtype, detname, detsegm, grp): global ctxt global lane global group group = grp cfg = get_config(connect_str, cfgtype, detname, detsegm) ocfg = cfg ctxt = Context('pva') epics_prefix = prefix + ':Top:' user_to_expert(ctxt, epics_prefix, cfg, full=True) # Assert clears names_clr = [ epics_prefix + 'BatcherEventBuilder:Blowoff', epics_prefix + 'TimingFrameRx:RxCountReset', epics_prefix + 'RawBuffers:CntRst', epics_prefix + 'Integrators:CntRst' ] values = [1] * len(names_clr) print('names {:}'.format(names_clr)) ctxt.put(names_clr, values) config_expert(ctxt, epics_prefix, cfg['expert']) ctxt.put(epics_prefix + 'TriggerEventManager:TriggerEventBuffer[%d]:MasterEnable' % lane, 1, wait=True) time.sleep(0.2) # Deassert clears values = [0] * len(names_clr) print('names {:}'.format(names_clr)) ctxt.put(names_clr, values) ctxt.put(epics_prefix + 'BatcherEventBuilder:Blowoff', 0, wait=True) cfg['firmwareVersion'] = ctxt.get(epics_prefix + 'AxiVersion:FpgaVersion').raw.value cfg['firmwareBuild'] = ctxt.get(epics_prefix + 'AxiVersion:BuildStamp').raw.value ctxt.close() v = json.dumps(cfg) return v
def hsd_update(update): global ocfg # extract updates cfg = {} update_config_entry(cfg, ocfg, json.loads(update)) # Apply group user_to_expert(cfg, full=False) # Apply config ctxt = Context('pva') apply_config(ctxt, cfg) ctxt.close() # Retain mandatory fields for XTC translation for key in ('detType:RO', 'detName:RO', 'detId:RO', 'doc:RO', 'alg:RO'): copy_config_entry(cfg, ocfg, key) copy_config_entry(cfg[':types:'], ocfg[':types:'], key) return json.dumps(cfg)
def wave8_connect(epics_prefix): # Retrieve connection information from EPICS # May need to wait for other processes here, so poll ctxt = Context('pva') for i in range(50): values = ctxt.get(epics_prefix+':Top:TriggerEventManager:XpmMessageAligner:RxId').raw.value if values!=0: break print('{:} is zero, retry'.format(epics_prefix+':Top:TriggerEventManager:XpmMessageAligner:RxId')) time.sleep(0.1) ctxt.close() d = {} d['paddr'] = values return json.dumps(d)
def hsd_connect(epics_prefix): # Retrieve connection information from EPICS # May need to wait for other processes here {PVA Server, hsdioc}, so poll ctxt = Context('pva') for i in range(50): values = ctxt.get(epics_prefix+':PADDR_U') if values!=0: break print('{:} is zero, retry'.format(epics_prefix+':PADDR_U')) time.sleep(0.1) ctxt.close() d = {} d['paddr'] = values return json.dumps(d)
def test_constant_variable_pva(value, prefix, server, model): ctxt = Context("pva", conf=PVA_CONFIG, maxsize=2) #check constant variable assignment for _, variable in model.input_variables.items(): pvname = f"{prefix}:{variable.name}" if variable.variable_type == "scalar": count = 3 successful_put = False while count > 0 and not successful_put: try: ctxt.put(pvname, value) successful_put = True except: ctxt.close() del ctxt time.sleep(3) ctxt = Context("pva", conf=PVA_CONFIG) count -= 1 if count == 0: raise Exception("Failed puts.") for _, variable in model.input_variables.items(): if variable.variable_type == "scalar": pvname = f"{prefix}:{variable.name}" count = 3 successful_get = False val = None while count > 0 and not successful_get: try: val = ctxt.get(pvname) successful_get = True except: ctxt.close() del ctxt time.sleep(5) ctxt = Context("pva", conf=PVA_CONFIG) time.sleep(1) count -= 1 if count == 0: raise Exception("Failed gets.") if variable.is_constant: assert val != value else: assert val == value ctxt.close()
class P4PProvider(QObject) : callbacksignal = pyqtSignal() def __init__(self): QObject.__init__(self) self.callbacksignal.connect(self.mycallback) self.callbackDoneEvent = Event() self.firstCallback = True self.isClosed = True self.monitorRateOnly = False self.ncallbacks = 0 self.lastTime = time.time() def start(self) : self.ctxt = Context('pva') self.firstCallback = True self.isClosed = False self.subscription = self.ctxt.monitor( getDynamicRecordName(), self.p4pcallback, request='field()', notify_disconnect=True) def stop(self) : self.isClosed = True self.ctxt.close() def done(self) : pass def callback(self,arg) : self.viewer.callback(arg) def p4pcallback(self,arg) : if self.monitorRateOnly : self.ncallbacks += 1 timenow = time.time() timediff = timenow - self.lastTime if timediff<1 : return print('rate=',round(self.ncallbacks/timediff)) self.lastTime = timenow self.ncallbacks = 0 return if self.isClosed : return self.struct = arg; self.callbacksignal.emit() self.callbackDoneEvent.wait() self.callbackDoneEvent.clear() def mycallback(self) : struct = self.struct arg = dict() try : argtype = str(type(struct)) if argtype.find('Disconnected')>=0 : arg["status"] = "disconnected" self.callback(arg) self.firstCallback = True self.callbackDoneEvent.set() return if self.firstCallback : arg = dict() arg["status"] = "connected" self.callback(arg) self.firstCallback = False self.callback(arg) data = DynamicRecordData() data.name = struct['name'] data.x = struct['x'] data.y = struct['y'] data.xmin = struct['xmin'] data.xmax = struct['xmax'] data.ymin = struct['ymin'] data.ymax = struct['ymax'] arg = dict() arg['value'] = data self.callback(arg) self.callbackDoneEvent.set() return except Exception as error: arg["exception"] = repr(error) self.callback(arg) self.callbackDoneEvent.set() return
class P4PProvider(QObject,NTNDA_Channel_Provider) : callbacksignal = pyqtSignal() def __init__(self): QObject.__init__(self) NTNDA_Channel_Provider.__init__(self) self.callbacksignal.connect(self.mycallback) self.callbackDoneEvent = Event() self.firstCallback = True self.isClosed = True def start(self) : self.ctxt = Context('pva') self.firstCallback = True self.isClosed = False self.subscription = self.ctxt.monitor( self.getChannelName(), self.p4pcallback, request='field(value,dimension,codec,compressedSize,uncompressedSize)', notify_disconnect=True) def stop(self) : self.isClosed = True self.ctxt.close() def done(self) : pass def callback(self,arg) : self.NTNDA_Viewer.callback(arg) def p4pcallback(self,arg) : if self.isClosed : return self.struct = arg; self.callbacksignal.emit() self.callbackDoneEvent.wait() self.callbackDoneEvent.clear() def mycallback(self) : struct = self.struct arg = dict() try : argtype = str(type(struct)) if argtype.find('Disconnected')>=0 : arg["status"] = "disconnected" self.callback(arg) self.firstCallback = True self.callbackDoneEvent.set() return if self.firstCallback : arg = dict() arg["status"] = "connected" self.callback(arg) self.firstCallback = False self.callback(arg) arg = dict() arg['value'] = struct['value'] arg['dimension'] = struct['dimension'] arg['codec'] = struct['codec'] arg['compressedSize'] = struct['compressedSize'] arg['uncompressedSize'] = struct['uncompressedSize'] self.callback(arg) self.callbackDoneEvent.set() return except Exception as error: arg["exception"] = repr(error) self.callback(arg) self.callbackDoneEvent.set() return
class pvGetClient(object): def __init__(self, pvName, monitor='False', provider='pva', timeout=5.0, repeat=1.0, showValue=False, throw=False, verbose=False, checkPriorCount=False): self._lock = Lock() self._pvName = pvName self._history = {} self._priorValue = None self._Q = Queue() self._S = None self._T = None self._Op = None self._noConnectionYet = True self._shutDown = False self._provider = provider self._throw = throw #self._monitor = monitor self._repeat = repeat self._showValue = showValue self._timeout = timeout self._verbose = verbose self._checkPriorCount = checkPriorCount self._ctxt = Context(provider) #self._pvGetDone = threading.Event() self._pvGetPending = threading.Event() self._T = threading.Thread(target=self.pvGetTimeoutLoop, args=(self._timeout, self._throw, self._verbose)) self._T.start() def __del__(self): if self._ctxt is not None: self._ctxt.close() self._ctxt = None def is_alive(self): if not self._T: return False return self._T.is_alive() def pvMonitor(self): # Monitor is is easy as p4p provides it. if self._ctxt is None: self._ctxt = Context(self._provider) self._S = self._ctxt.monitor(self._pvName, self.callback, notify_disconnect=True) # Above code does this: # R = Subscription( self._ctxt, self._pvName, self.callback, notify_disconnect=True ) # R._S = super(Context, self).monitor( name, R._event, request ) # self._S = R def pvGetInitiate(self, timeout=5.0, throw=False, verbose=True): #pdb.set_trace() # This code block does a synchronous get() #return self._ctxt.get( self._pvName, self.callback ) # Initiate async non-blocking pvGet using a ClientOperation. # self.pvGetCallback() handles the response and places it on self._Q if self._ctxt is None: self._ctxt = Context(self._provider) raw_get = super(Context, self._ctxt).get try: assert self._Op is None self._Op = raw_get(self._pvName, self.pvGetCallback) self._pvGetPending.set() except: raise return def pvGetCallback(self, cbData): result = self.callback(cbData) #self._ctxt.disconnect() #self._noConnectionYet = True if result is not None: assert self._Q try: self._Q.put_nowait(result) if self._verbose: print("%s: Added result to queue. %d on queue." % (self._pvName, self._Q.qsize())) except: print("pvGetCallback %s: Error queuing result" % self._pvName) return def handleResult(self): # Get pvGet result from self._Q result = False try: result = self._Q.get(timeout=self._timeout) except Empty: curTime = time.time() raw_stamp = (int(curTime), int((curTime - int(curTime)) * 1e9)) strTimeStamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(raw_stamp[0])) print('%s %s.%03d Timeout' % (self._pvName, strTimeStamp, float(raw_stamp[1]) / 1e6)) if self._throw: raise TimeoutError() if isinstance(result, Exception): if self._throw: raise result return None return result def pvGetTimeoutLoop(self, timeout=5.0, throw=False, verbose=True): status = False while not self._shutDown: # Wait for something to do. status = self._pvGetPending.wait(timeout=timeout) self._pvGetPending.clear() status = self.handleResult() if self._Op: self._Op.close() self._Op = None #self._ctxt.disconnect() #self._noConnectionYet = True self._ctxt.close() self._ctxt = None self._noConnectionYet = True if self._repeat is None: self._shutDown = True if self._shutDown: break time.sleep(self._repeat) self.pvGetInitiate() # Return on shutdown return def pvName(self): return self._pvName def callback(self, cbData): pvName = self._pvName if isinstance(cbData, (RemoteError, Disconnected, Cancelled)): if self._noConnectionYet and isinstance(cbData, Disconnected): return None if not isinstance(cbData, Cancelled): print('%s: %s' % (pvName, cbData)) return None self._noConnectionYet = False pvValue = cbData # Make sure we have a raw_stamp #pdb.set_trace() raw_stamp = None if hasattr(pvValue, 'raw_stamp'): raw_stamp = pvValue.raw_stamp #elif hasattr( pvValue, 'timestamp' ): # tsSec = pvValue.timestamp # raw_stamp = ( int(tsSec), int((tsSec - int(tsSec)) * 1e9) ) elif isinstance(pvValue, dict): if 'raw_stamp' in pvValue: raw_stamp = pvValue['raw_stamp'] if 'timestamp' in pvValue: raw_stamp = pvValue['timestamp'] if 'timeStamp' in pvValue: raw_stamp = pvValue['timeStamp'] if raw_stamp is None or len(raw_stamp) != 2 or raw_stamp[0] == 0: if self._verbose: print("%s: No timestamp found. Using TOD" % pvName) tsSec = time.time() raw_stamp = (int(tsSec), int((tsSec - int(tsSec)) * 1e9)) if isinstance(pvValue, p4p.nt.scalar.ntwrappercommon): self.saveNtScalar(pvName, raw_stamp, pvValue) return cbData if isinstance(pvValue, p4p.wrapper.Value): if self._verbose: print('%s: ID=%s, type=%s' % (pvName, pvValue.getID(), type(pvValue))) pvType = pvValue.type() if 'timeStamp' in pvValue: fieldTs = (pvValue['timeStamp.secondsPastEpoch'], pvValue['timeStamp.nanoseconds']) if fieldTs[0]: raw_stamp = fieldTs if pvValue.getID().startswith('epics:nt/NTTable:'): tableValue = pvValue['value'] tableType = pvType['value'] S, id, tableFields = tableType.aspy() assert S == 'S' assert id == 'structure' tableItems = tableValue.items() nCols = len(tableItems) nRows = len(tableItems[0][1]) if self._verbose: print("%s NTTable: nRows=%d, nCols=%d\n%s" % (pvName, nRows, nCols, tableItems)) for row in range(nRows): # Build up fullName fullName = pvName for col in range(nCols): spec = tableFields[col][1] if spec == 'as': fullName += '.' + tableItems[col][1][row] elif spec != 'av' and spec != 'aU' and spec != 'aS': self.saveValue( fullName + '.' + tableFields[col][0], raw_stamp, tableItems[col][1][row]) return cbData # This method works fpr p2p/Stats and potentially other # simple PVStruct based PVs. #if pvValue.getID() == 'epics:p2p/Stats:1.0': for fieldName in pvValue.keys(): pvField = pvValue[fieldName] if 'timeStamp' in pvField: fieldTs = (pvField['timeStamp.secondsPastEpoch'], pvField['timeStamp.nanoseconds']) if fieldTs[0]: raw_stamp = fieldTs fullName = pvName + '.' + fieldName if isinstance(pvField, p4p.nt.scalar.ntwrappercommon): cbData = self.saveNtScalar(fullName, raw_stamp, pvField['value']) elif pvField.getID().startswith('epics:nt/NTScalar:'): cbData = self.saveValue(fullName, raw_stamp, pvField['value']) # TODO: Handle other nt types return cbData def saveNtScalar(self, pvName, raw_stamp, pvValue): if self._verbose: print('%s: type=%s' % (pvName, type(pvValue))) if self._checkPriorCount: newValue = int(pvValue) if self._priorValue is not None: # Check for missed count expectedValue = self._priorValue + 1 if expectedValue != newValue: print('%s: missed %d counts!' % (pvName, newValue - expectedValue)) self._priorValue = newValue # Save value self.saveValue(pvName, raw_stamp, pvValue) return def saveValue(self, pvName, raw_stamp, value): #pdb.set_trace() if isinstance(value, p4p.nt.scalar.ntnumericarray): if value.size == 0: return value = value[0] if isinstance(value, list): if len(value) == 0: return value = value[0] # assert pvValue.type() == Scalar: if pvName not in self._history: self._history[pvName] = [] self._history[pvName] += [[raw_stamp, value]] if self._showValue: strTimeStamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(raw_stamp[0])) print('%s %s.%03d %s' % (pvName, strTimeStamp, float(raw_stamp[1]) / 1e6, float(value))) if self._verbose: print('%s: value raw_stamp = %s' % (pvName, raw_stamp)) print('%s: Num values = %d' % (pvName, len(self._history[pvName]))) def writeValues(self, dirName): if not os.path.isdir(dirName): os.mkdir(dirName) for pvName in self._history: saveFile = os.path.join(dirName, pvName + '.pvget') try: pvHistory = self._history[pvName] with open(saveFile, "w") as f: if self._verbose or True: print("Writing %d values to %s ..." % (len(pvHistory), saveFile)) # Not using json.dump so output matches similar # stressTestClient pvCapture output #json.dump( pvHistory, f, indent=4 ) #continue f.write('[\n') if len(pvHistory) > 1: for tsVal in pvHistory[0:-1]: f.write("\t[ [ %d, %d ], %d ],\n" % (tsVal[0][0], tsVal[0][1], tsVal[1])) if len(pvHistory) > 0: # Write last value tsVal = pvHistory[-1] f.write("\t[ [ %d, %d ], %d ],\n" % (tsVal[0][0], tsVal[0][1], tsVal[1])) f.write(']\n') except BaseException as e: print("Error: %s" % e) print("Unable to write values to %s" % saveFile) def closeSubscription(self): self._shutDown = True if self._S is not None: if self._verbose: print("Closing subscription to %s" % self._pvName) self._S.close() self._S = None def __exit__(self): self.closeSubscription()
def ts_config(connect_json, cfgtype, detname): cfg = get_config(connect_json, cfgtype, detname) connect_info = json.loads(connect_json) # get the list of readout groups that the user has selected # so we only configure those readout_groups = [] connect_info = json.loads(connect_json) for nodes in connect_info['body']['drp'].values(): readout_groups.append(nodes['det_info']['readout']) readout_groups = set(readout_groups) control_info = connect_info['body']['control']['0']['control_info'] xpm_master = control_info['xpm_master'] pv_prefix = control_info['pv_base'] + ':XPM:' + str(xpm_master) + ':PART:' # this structure of epics variable names must mirror # the configdb. alternatively, we could consider # putting these in the configdb, perhaps as readonly fields. # only do a few of these for now, since Matt is switching # to rogue pvtable = {} mydict = {} for group in readout_groups: grp_prefix = 'group' + str(group) grp = cfg[grp_prefix] pvtable[grp_prefix] = { 'trigMode': 'L0Select', 'delay': 'L0Delay', 'fixed': { 'rate': 'L0Select_FixedRate' }, 'ac': { 'rate': 'L0Select_ACRate' }, 'seq': { 'mode': 'L0Select_Sequence' }, 'destination': { 'select': 'DstSelect' }, } epics_names_values(group, pvtable, cfg, mydict) # handle special cases that don't work in the "pvtable" paradigm # convert ac.ts0 through ac.ts5 to L0Select_ACTimeslot bitmask tsmask = 0 for tsnum in range(6): tsval = grp['ac']['ts' + str(tsnum)] tsmask |= 1 << tsval mydict[str(group) + ':L0Select_ACTimeslot'] = tsmask # L0Select_SeqBit is one var used by all of seq.(burst/fixed/local) if grp['seq']['mode'] == 15: # burst seqbit = grp['seq']['burst']['mode'] elif grp['seq']['mode'] == 16: # fixed rate seqbit = grp['seq']['fixed']['rate'] elif grp['seq']['mode'] == 17: # local seqbit = grp['seq']['local']['rate'] else: raise ValueError('Illegal value for trigger sequence mode') mydict[str(group) + ':L0Select_SeqBit'] = seqbit # DstSelect_Mask should come from destination.dest0 through dest15 dstmask = 0 for dstnum in range(16): dstval = grp['destination']['dest' + str(dstnum)] if dstval: dstmask |= 1 << dstnum mydict[str(group) + ':DstSelect_Mask'] = dstmask # 4 InhEnable/InhInterval/InhLimit for inhnum in range(4): mydict[str(group) + ':InhInterval' + str(inhnum)] = grp['inhibit' + str(inhnum)]['interval'] mydict[str(group) + ':InhLimit' + str(inhnum)] = grp['inhibit' + str(inhnum)]['limit'] mydict[str(group) + ':InhEnable' + str(inhnum)] = grp['inhibit' + str(inhnum)]['enable'] names = list(mydict.keys()) values = list(mydict.values()) names = [pv_prefix + n for n in names] print('TS config names and values:', names, values) # program the values ctxt = Context('pva') ctxt.put(names, values) ctxt.close() return json.dumps(cfg)
class CustomController: """ Controller class used to access process variables. Controllers are used for interfacing with both Channel Access and pvAccess process variables. The controller object is initialized using a single protocol has methods for both getting and setting values on the process variables. Attributes: protocol (str): Protocol for getting values from variables ("pva" for pvAccess, "ca" for Channel Access) context (Context): P4P threaded context instance for use with pvAccess. set_ca (bool): Update Channel Access variable on put. set_pva (bool): Update pvAccess variable on put. pv_registry (dict): Registry mapping pvname to dict of value and pv monitor Example: ``` # create PVAcess controller controller = Controller("pva") value = controller.get_value("scalar_input") image_value = controller.get_image("image_input") controller.close() ``` """ def __init__(self, protocol: str, prefix, track_inputs: bool = False, input_pvs: list = None): """ Initializes controller. Stores protocol and creates context attribute if using pvAccess. Args: protocol (str): Protocol for getting values from variables ("pva" for pvAccess, "ca" for Channel Access) """ self.protocol = protocol self.last_update = "" self.pv_registry = defaultdict() self.track_inputs = track_inputs self.input_pvs = [f"{prefix}:{variable}" for variable in input_pvs] self.prefix = prefix # initalize context for pva self.context = None if self.protocol == "pva": self.context = Context("pva") def ca_value_callback(self, pvname, value, *args, **kwargs): """Callback executed by Channel Access monitor. Args: pvname (str): Process variable name value (Union[np.ndarray, float]): Value to assign to process variable. """ self.pv_registry[pvname]["value"] = value if self.track_inputs: if pvname in self.input_pvs: self.last_update = datetime.now().strftime( '%m/%d/%Y, %H:%M:%S') def ca_connection_callback(self, *, pvname, conn, pv): """Callback used for monitoring connection and setting values to None on disconnect. """ # if disconnected, set value to None if not conn: self.pv_registry[pvname]["value"] = None def pva_value_callback(self, pvname, value): """Callback executed by pvAccess monitor. Args: pvname (str): Process variable name value (Union[np.ndarray, float]): Value to assign to process variable. """ if isinstance(value, Disconnected): self.pv_registry[pvname]["value"] = None else: self.pv_registry[pvname]["value"] = value if self.track_inputs: if pvname in self.input_pvs: self.last_update = datetime.now().strftime( '%m/%d/%Y, %H:%M:%S') def setup_pv_monitor(self, pvname): """Set up process variable monitor. Args: pvname (str): Process variable name """ if pvname in self.pv_registry: return if self.protocol == "ca": # add to registry (must exist for connection callback) self.pv_registry[pvname] = {"pv": None, "value": None} # create the pv pv_obj = PV(pvname, callback=self.ca_value_callback, connection_callback=self.ca_connection_callback) # update registry self.pv_registry[pvname]["pv"] = pv_obj elif self.protocol == "pva": cb = partial(self.pva_value_callback, pvname) # populate registry s.t. initially disconnected will populate self.pv_registry[pvname] = {"pv": None, "value": None} # create the monitor obj mon_obj = self.context.monitor(pvname, cb, notify_disconnect=True) # update registry with the monitor self.pv_registry[pvname]["pv"] = mon_obj def get(self, pvname: str) -> np.ndarray: """ Accesses and returns the value of a process variable. Args: pvname (str): Process variable name """ self.setup_pv_monitor(pvname) pv = self.pv_registry.get(pvname, None) if pv: #return pv.get("value", None) return pv["value"] return None def get_value(self, pvname): """Gets scalar value of a process variable. Args: pvname (str): Image process variable name. """ value = self.get(pvname) if value is None: value = DEFAULT_SCALAR_VALUE return value def get_image(self, pvname) -> dict: """Gets image data via controller protocol. Args: pvname (str): Image process variable name """ image = None if self.protocol == "ca": image_flat = self.get(f"{pvname}:ArrayData_RBV") nx = self.get(f"{pvname}:ArraySizeX_RBV") ny = self.get(f"{pvname}:ArraySizeY_RBV") x = self.get(f"{pvname}:MinX_RBV") y = self.get(f"{pvname}:MinY_RBV") x_max = self.get(f"{pvname}:MaxX_RBV") y_max = self.get(f"{pvname}:MaxY_RBV") if all([ image_def is not None for image_def in [image_flat, nx, ny, x, y, x_max, y_max] ]): dw = x_max - x dh = y_max - y image = image_flat.reshape(int(nx), int(ny)) elif self.protocol == "pva": # context returns numpy array with WRITEABLE=False # copy to manipulate array below image = self.get(pvname) if image is not None: attrib = image.attrib x = attrib["x_min"] y = attrib["y_min"] dw = attrib["x_max"] - attrib["x_min"] dh = attrib["y_max"] - attrib["y_min"] image = copy.copy(image) if image is not None: return { "image": [image], "x": [x], "y": [y], "dw": [dw], "dh": [dh], } else: return DEFAULT_IMAGE_DATA def put(self, pvname, value: Union[np.ndarray, float], timeout=1.0) -> None: """Assign the value of a process variable. Args: pvname (str): Name of the process variable value (Union[np.ndarray, float]): Value to assing to process variable. timeout (float): Operation timeout in seconds """ self.setup_pv_monitor(pvname) # allow no puts before a value has been collected registered = self.get(pvname) # if the value is registered if registered is not None: if self.protocol == "ca": self.pv_registry[pvname]["pv"].put(value, timeout=timeout) elif self.protocol == "pva": self.context.put(pvname, value, throw=False, timeout=timeout) else: logger.debug(f"No initial value set for {pvname}.") def close(self): if self.protocol == "pva": self.context.close()
def ts_config(connect_json,cfgtype,detname,detsegm): cfg = get_config(connect_json,cfgtype,detname,detsegm) connect_info = json.loads(connect_json) # get the list of readout groups that the user has selected # so we only configure those readout_groups = [] connect_info = json.loads(connect_json) for nodes in connect_info['body']['drp'].values(): readout_groups.append(nodes['det_info']['readout']) readout_groups = set(readout_groups) control_info = connect_info['body']['control']['0']['control_info'] xpm_master = control_info['xpm_master'] pv_prefix = control_info['pv_base']+':XPM:'+str(xpm_master)+':PART:' rcfg = cfg.copy() rcfg['user' ] = {} rcfg['expert'] = {} linacMode = cfg['user']['LINAC'] rcfg['user']['LINAC'] = linacMode rcfg['user']['Cu' if linacMode==0 else 'SC'] = {} pvdict = {} # dictionary of epics pv name : value for group in readout_groups: if linacMode == 0: # Cu grp_prefix = 'group'+str(group)+'_eventcode' eventcode = cfg['user']['Cu'][grp_prefix] rcfg['user']['Cu'][grp_prefix] = eventcode pvdict[str(group)+':L0Select' ] = 2 # eventCode pvdict[str(group)+':L0Select_EventCode'] = eventcode pvdict[str(group)+':DstSelect' ] = 1 # DontCare else: # SC grp_prefix = 'group'+str(group) grp = cfg['user']['SC'][grp_prefix] rcfg['user']['SC'][grp_prefix] = grp pvdict[str(group)+':L0Select' ] = grp['trigMode'] pvdict[str(group)+':L0Select_FixedRate'] = grp['fixed']['rate'] pvdict[str(group)+':L0Select_ACRate' ] = grp['ac']['rate'] pvdict[str(group)+':L0Select_EventCode'] = 0 # not an option pvdict[str(group)+':L0Select_Sequence' ] = grp['seq']['mode'] pvdict[str(group)+':DstSelect' ] = grp['destination']['select'] # convert ac.ts0 through ac.ts5 to L0Select_ACTimeslot bitmask tsmask = 0 for tsnum in range(6): tsval = grp['ac']['ts'+str(tsnum)] tsmask |= 1<<tsval pvdict[str(group)+':L0Select_ACTimeslot'] = tsmask # L0Select_SeqBit is one var used by all of seq.(burst/fixed/local) if grp['seq']['mode']==15: # burst seqbit = grp['seq']['burst']['mode'] elif grp['seq']['mode']==16: # fixed rate seqbit = grp['seq']['fixed']['rate'] elif grp['seq']['mode']==17: # local seqbit = grp['seq']['local']['rate'] else: raise ValueError('Illegal value for trigger sequence mode') pvdict[str(group)+':L0Select_SeqBit'] = seqbit # DstSelect_Mask should come from destination.dest0 through dest15 dstmask = 0 for dstnum in range(16): dstval = grp['destination']['dest'+str(dstnum)] if dstval: dstmask |= 1<<dstnum pvdict[str(group)+':DstSelect_Mask'] = dstmask grp_prefix = 'group'+str(group) grp = cfg['expert'][grp_prefix] rcfg['expert'][grp_prefix] = grp # 4 InhEnable/InhInterval/InhLimit for inhnum in range(4): pvdict[str(group)+':InhInterval'+str(inhnum)] = grp['inhibit'+str(inhnum)]['interval'] pvdict[str(group)+':InhLimit'+str(inhnum)] = grp['inhibit'+str(inhnum)]['limit'] pvdict[str(group)+':InhEnable'+str(inhnum)] = grp['inhibit'+str(inhnum)]['enable'] names = list(pvdict.keys()) values = list(pvdict.values()) names = [pv_prefix+n for n in names] # program the values ctxt = Context('pva') ctxt.put(names,values) # Capture firmware version for persistence in xtc pv_prefix = control_info['pv_base']+':XPM:'+str(xpm_master)+':' #rcfg['firmwareVersion'] = ctxt.get(pv_prefix+'FwVersion').raw.value rcfg['firmwareBuild' ] = ctxt.get(pv_prefix+'FwBuild').raw.value ctxt.close() return json.dumps(rcfg)
class ts_connector: def __init__(self, json_connect_info): self.connect_info = json.loads(json_connect_info) print('*** connect_info', self.connect_info) control_info = self.connect_info['body']['control']['0'][ 'control_info'] self.xpm_base = control_info['pv_base'] + ':XPM:' master_xpm_num = control_info['xpm_master'] self.master_xpm_pv = self.xpm_base + str(master_xpm_num) + ':' self.ctxt = Context('pva') self.get_xpm_info() self.get_readout_group_mask() # unfortunately, the hsd needs the Rx link reset before the Tx, # otherwise we get CRC errors on the link. # try commenting this out since Matt has made the links more reliable #self.xpm_link_reset('Rx') #self.xpm_link_reset('Tx') # must come after clear readout because clear readout increments # the event counters, and the pgp eb needs them to start from zero # comment this out since it was moved to control.py #self.l0_count_reset() # enables listening to deadtime self.xpm_link_enable() self.ctxt.close() def get_readout_group_mask(self): self.readout_group_mask = 0 for _, _, readout_group in self.xpm_info: self.readout_group_mask |= (1 << readout_group) def get_xpm_info(self): self.xpm_info = [] for key, node_info in self.connect_info['body']['drp'].items(): try: # FIXME: should have a better method to map xpm ip # address to xpm number (used to create pv names) xpm_id = int(node_info['connect_info']['xpm_ip'].split('.')[2]) xpm_port = node_info['connect_info']['xpm_port'] readout_group = node_info['det_info']['readout'] self.xpm_info.append((xpm_id, xpm_port, readout_group)) except KeyError: pass def xpm_link_disable_all(self): # FIXME: need a mechanism to disable unused links in all # downstream XPMs. For now, just clear out our readout # groups from all the XPMs we know about from the collection, # which comes from the "remote link id" info in the drp nodes. xpms = [xpm_num for xpm_num, _, _ in self.xpm_info] unique_xpms = set(xpms) pv_names = [] for xpm_num in unique_xpms: for xpm_port in range(32): pv_names.append(self.xpm_base + str(xpm_num) + ':' + 'LinkGroupMask' + str(xpm_port)) current_group_masks = self.ctxt.get(pv_names) print(current_group_masks) # don't clear out group_mask 0xff (an indication that it's # a downstream XPM link) #pv_names_to_clear = [pv_name for (pv_name,group_mask) in zip(pv_names,current_group_masks) if (group_mask & self.readout_group_mask) and (group_mask != 0xff)] #print('*** clearing xpm links',pv_names_to_clear) #self.ctxt.put(pv_names_to_clear,len(pv_names_to_clear)*[0]) def xpm_link_enable(self): self.xpm_link_disable_all() pv_names = [] values = [] for xpm_num, xpm_port, readout_group in self.xpm_info: pvname = self.xpm_base + str( xpm_num) + ':' + 'LinkGroupMask' + str(xpm_port) pv_names.append(pvname) values.append((1 << readout_group)) print('*** setting xpm link enables', pv_names, values) self.ctxt.put(pv_names, values) def xpm_link_reset(self, style): # make pv name that looks like DAQ:LAB2:XPM:1:RxLinkReset11 # for xpm_num 1 and xpm_port 11 pv_names = [] for xpm_num, xpm_port, _ in self.xpm_info: pvname = self.xpm_base + str( xpm_num) + ':' + style + 'LinkReset' + str(xpm_port) pv_names.append(pvname) print('*** xpm link resetting', pv_names) self.ctxt.put(pv_names, len(pv_names) * [1]) # unfortunately need to wait for the links to relock, which # matt says takes "an appreciable fraction of a second". # empirically, the links seem unreliable unless we wait 2s. time.sleep(2) def l0_count_reset(self): pvL0Reset = self.master_xpm_pv + 'GroupL0Reset' print('*** resetting l0 count', self.readout_group_mask) self.ctxt.put(pvL0Reset, self.readout_group_mask)
def wave8_config(prefix,connect_str,cfgtype,detname,detsegm,group): global ctxt cfg = get_config(connect_str,cfgtype,detname,detsegm) ctxt = Context('pva') # | timing fiducial # PartitionDelay | TriggerEventManager.TriggerEventBuffer receives xpm trigger # TriggerDelay | TriggerEventManager.triggerBus asserts trigger # IntStart | Integrators.intStart (baseline latched) # IntLen | Intregrators.intEnd # | RawDataBuffer start # RawBuffLen | RawDataBuffer End epics_prefix = prefix + ':Top:' partitionDelay = ctxt.get(epics_prefix+'TriggerEventManager:XpmMessageAligner:PartitionDelay[%d]'%group) raw = cfg['user']['raw'] rawStart = raw['start_ns'] triggerDelay = rawStart*1300/7000 - partitionDelay*200 print('partitionDelay {:} rawStart {:} triggerDelay {:}'.format(partitionDelay,rawStart,triggerDelay)) if triggerDelay < 0: raise ValueError('triggerDelay computes to < 0') rawNsamples = int(raw['gate_ns']*0.25) if rawNsamples>256: raise ValueError('raw.gate_ns > 1020') raw['nsamples'] = rawNsamples fex = cfg['user']['fex'] intStart = fex['start_ns'] if intStart < rawStart: print('fex.start_ns {:} raw.start_ns {:}'.format(intStart,rawStart)) raise ValueError('fex.start_ns < raw.start_ns') fexTrigDelay = int((intStart-rawStart)*250/1000) if fexTrigDelay > 255: raise ValueError('fex.start_ns > raw.start_ns + 1020') fexNsamples = int(fex['gate_ns']*0.25) if fexNsamples>255: raise ValueError('fex.gate_ns > 1020') fex['nsamples'] = rawNsamples # Assert clears names_clr = [epics_prefix+'BatcherEventBuilder:Blowoff', epics_prefix+'TimingFrameRx:RxCountReset', epics_prefix+'RawBuffers:CntRst', epics_prefix+'Integrators:CntRst'] values = [1]*len(names_clr) print('names {:}'.format(names_clr)) ctxt.put(names_clr,values) expert = cfg['expert']['Top'] expert['TriggerEventManager']['TriggerEventBuffer[0]']['TriggerDelay'] = triggerDelay for i in range(8): expert['RawBuffers']['BuffEn[%d]'%i] = raw['enable[%d]'%i] # Firmware needs a value one less expert['RawBuffers']['BuffLen'] = rawNsamples-1 # Firmware needs a value one less prescale = raw['prescale'] if prescale>0: prescale -= 1 expert['RawBuffers']['TrigPrescale'] = prescale expert['Integrators']['TrigDelay'] = fexTrigDelay # Firmware needs a value one less expert['Integrators']['IntegralSize'] = fexNsamples-1 expert['Integrators']['BaselineSize'] = fex['baseline'] for i in range(4): expert['Integrators']['CorrCoefficientFloat64[%d]'%i] = fex['coeff[%d]'%i] expert['TriggerEventManager']['TriggerEventBuffer[0]']['Partition'] = group names = [] values = [] epics_put(cfg['expert'],prefix+':',names,values) ctxt.put(names,values) ctxt.put(epics_prefix+'TriggerEventManager:TriggerEventBuffer[0]:MasterEnable', 1, wait=True) time.sleep(0.2) # Deassert clears values = [0]*len(names_clr) ctxt.put(names_clr,values) ctxt.put(epics_prefix+'BatcherEventBuilder:Blowoff', 0, wait=True) cfg['firmwareVersion'] = ctxt.get(epics_prefix+'AxiVersion:FpgaVersion').raw.value cfg['firmwareBuild' ] = ctxt.get(epics_prefix+'AxiVersion:BuildStamp').raw.value ctxt.close() v = json.dumps(cfg) return v
def hsd_config(connect_str, epics_prefix, cfgtype, detname, detsegm, group): cfg = get_config(connect_str, cfgtype, detname, detsegm) # fetch the current configuration for defaults not specified in the configuration ctxt = Context('pva') values = ctxt.get(epics_prefix + ':CONFIG') # fetch the xpm delay partitionDelay = ctxt.get(epics_prefix + ':MONTIMING').msgdelayset print('partitionDelay {:}'.format(partitionDelay)) # # Validate user raw values # raw = cfg['user']['raw'] raw_start = (raw['start_ns'] * 1300 / 7000 - partitionDelay * 200) * 160 / 200 # in "160MHz"(*13/14) clks if raw_start < 0: print('partitionDelay {:} raw_start_ns {:} raw_start {:}'.format( partitionDelay, raw['start_ns'], raw_start)) raise ValueError('raw_start computes to < 0') raw_gate = int(raw['gate_ns'] * 0.160 * 13 / 14) # in "160" MHz clks raw_nsamples = raw_gate * 40 if raw_gate < 0: raise ValueError('raw_gate computes to < 0') if raw_gate > 4000: raise ValueError('raw_gate computes to > 4000; raw_nsamples > 160000') # # Validate user fex values # fex = cfg['user']['fex'] fex_start = int((fex['start_ns'] * 1300 / 7000 - partitionDelay * 200) * 160 / 200) # in "160MHz"(*13/14) clks if fex_start < 0: print('partitionDelay {:} fex_start_ns {:} fex_start {:}'.format( partitionDelay, fex['start_ns'], fex_start)) raise ValueError('fex_start computes to < 0') fex_gate = int(fex['gate_ns'] * 0.160 * 13 / 14) # in "160" MHz clks fex_nsamples = fex_gate * 40 if fex_gate < 0: raise ValueError('fex_gate computes to < 0') # Place no constraint on upper bound. Assumes sparsification will reduce to < 160000 recorded samples # hsd_thr_ilv_native_fine firmware expects xpre,xpost in # of super samples (4 samples) fex_xpre = int((fex['xpre'] + 3) / 4) fex_xpost = int((fex['xpost'] + 3) / 4) # overwrite expert fields from user input expert = cfg['expert'] expert['readoutGroup'] = group expert['enable'] = 1 expert['raw_start'] = raw_start expert['raw_gate'] = raw_gate expert['raw_prescale'] = raw['prescale'] expert['fex_start'] = fex_start expert['fex_gate'] = fex_gate expert['fex_xpre'] = fex_xpre expert['fex_xpost'] = fex_xpost expert['fex_prescale'] = fex['prescale'] # program the values print(epics_prefix) ctxt.put(epics_prefix + ':READY', 0, wait=True) ctxt.put(epics_prefix + ':CONFIG', expert, wait=True) # the completion of the "put" guarantees that all of the above # have completed (although in no particular order) complete = False for i in range(100): complete = ctxt.get(epics_prefix + ':READY') != 0 if complete: break print('hsd config wait for complete', i) time.sleep(0.1) if complete: print('hsd config complete') else: raise Exception('timed out waiting for hsd configure') cfg['firmwareVersion'] = ctxt.get(epics_prefix + ':FWVERSION').raw.value cfg['firmwareBuild'] = ctxt.get(epics_prefix + ':FWBUILD').raw.value ctxt.close() return json.dumps(cfg)
def apply_config(cfg): global pv_prefix rcfg = {} rcfg = cfg.copy() rcfg['user'] = {} rcfg['expert'] = {} linacMode = cfg['user']['LINAC'] rcfg['user']['LINAC'] = linacMode rcfg['user']['Cu' if linacMode == 0 else 'SC'] = {} pvdict = {} # dictionary of epics pv name : value for group in readout_groups: if linacMode == 0: # Cu grp_prefix = 'group' + str(group) + '_eventcode' eventcode = cfg['user']['Cu'][grp_prefix] rcfg['user']['Cu'][grp_prefix] = eventcode pvdict[str(group) + ':L0Select'] = 2 # eventCode pvdict[str(group) + ':L0Select_EventCode'] = eventcode pvdict[str(group) + ':DstSelect'] = 1 # DontCare else: # SC grp_prefix = 'group' + str(group) grp = cfg['user']['SC'][grp_prefix] rcfg['user']['SC'][grp_prefix] = grp pvdict[str(group) + ':L0Select'] = grp['trigMode'] pvdict[str(group) + ':L0Select_FixedRate'] = grp['fixed']['rate'] pvdict[str(group) + ':L0Select_ACRate'] = grp['ac']['rate'] pvdict[str(group) + ':L0Select_EventCode'] = 0 # not an option pvdict[str(group) + ':L0Select_Sequence'] = grp['seq']['mode'] pvdict[str(group) + ':DstSelect'] = grp['destination']['select'] # convert ac.ts0 through ac.ts5 to L0Select_ACTimeslot bitmask tsmask = 0 for tsnum in range(6): tsval = grp['ac']['ts' + str(tsnum)] tsmask |= 1 << tsval pvdict[str(group) + ':L0Select_ACTimeslot'] = tsmask # L0Select_SeqBit is one var used by all of seq.(burst/fixed/local) if grp['seq']['mode'] == 15: # burst seqbit = grp['seq']['burst']['mode'] elif grp['seq']['mode'] == 16: # fixed rate seqbit = grp['seq']['fixed']['rate'] elif grp['seq']['mode'] == 17: # local seqbit = grp['seq']['local']['rate'] else: raise ValueError('Illegal value for trigger sequence mode') pvdict[str(group) + ':L0Select_SeqBit'] = seqbit # DstSelect_Mask should come from destination.dest0 through dest15 dstmask = 0 for dstnum in range(16): dstval = grp['destination']['dest' + str(dstnum)] if dstval: dstmask |= 1 << dstnum pvdict[str(group) + ':DstSelect_Mask'] = dstmask grp_prefix = 'group' + str(group) grp = cfg['expert'][grp_prefix] rcfg['expert'][grp_prefix] = grp # 4 InhEnable/InhInterval/InhLimit for inhnum in range(4): pvdict[str(group) + ':InhInterval' + str(inhnum)] = grp['inhibit' + str(inhnum)]['interval'] pvdict[str(group) + ':InhLimit' + str(inhnum)] = grp['inhibit' + str(inhnum)]['limit'] pvdict[str(group) + ':InhEnable' + str(inhnum)] = grp['inhibit' + str(inhnum)]['enable'] names = list(pvdict.keys()) values = list(pvdict.values()) names = [pv_prefix + 'PART:' + n for n in names] # program the values ctxt = Context('pva') ctxt.put(names, values) # Capture firmware version for persistence in xtc #rcfg['firmwareVersion'] = ctxt.get(pv_prefix+'FwVersion').raw.value rcfg['firmwareBuild'] = ctxt.get(pv_prefix + 'FwBuild').raw.value ctxt.close() return json.dumps(rcfg)
def apply_update(cfg): global pv_prefix rcfg = {} pvdict = {} # dictionary of epics pv name : value for key in cfg: if key == 'user': rcfg['user'] = {} linacMode = ocfg['user']['LINAC'] # this won't scan if full: rcfg['user']['LINAC'] = linacMode rcfg['user']['Cu' if linacMode == 0 else 'SC'] = {} for group in readout_groups: if linacMode == 0: # Cu try: grp_prefix = 'group' + str(group) + '_eventcode' eventcode = cfg['user']['Cu'][grp_prefix] rcfg['user']['Cu'][grp_prefix] = eventcode pvdict[str(group) + ':L0Select'] = 2 # eventCode pvdict[str(group) + ':L0Select_EventCode'] = eventcode pvdict[str(group) + ':DstSelect'] = 1 # DontCare except KeyError: pass else: # SC pass # nothing here to scan (too complicated to implement) if key == 'expert': rcfg['expert'] = {} for group in readout_groups: grp_prefix = 'group' + str(group) if grp_prefix in cfg['expert']: grp = cfg['expert'][grp_prefix] rcfg['expert'][grp_prefix] = {} # 4 InhEnable/InhInterval/InhLimit for inhnum in range(4): inhkey = 'inhibit' + str(inhnum) if inhkey in grp: inhgrp = grp[inhkey] rcfg['expert'][grp_prefix][inhkey] = inhgrp rgrp = rcfg['expert'][grp_prefix][inhkey] if 'interval' in inhgrp: pvdict[str(group) + ':InhInterval' + str(inhnum)] = inhgrp['interval'] if 'limit' in inhgrp: pvdict[str(group) + ':InhLimit' + str(inhnum)] = inhgrp['limit'] if 'enable' in inhgrp: pvdict[str(group) + ':InhEnable' + str(inhnum)] = inhgrp['enable'] else: rcfg[key] = cfg[key] names = list(pvdict.keys()) values = list(pvdict.values()) names = [pv_prefix + 'PART:' + n for n in names] # program the values ctxt = Context('pva') ctxt.put(names, values) ctxt.close() return json.dumps(rcfg)
def hsd_config(connect_str, epics_prefix, cfgtype, detname, group): cfg = get_config(connect_str, cfgtype, detname) # this structure of epics variable names must mirror # the configdb. alternatively, we could consider # putting these in the configdb, perhaps as readonly fields. pvtable = { 'enable': 'enable', 'raw': { 'start': 'raw_start', 'gate': 'raw_gate', 'prescale': 'raw_prescale' }, 'fex': { 'start': 'fex_start', 'gate': 'fex_gate', 'prescale': 'fex_prescale', 'ymin': 'fex_ymin', 'ymax': 'fex_ymax', 'xpre': 'fex_xpre', 'xpost': 'fex_xpost' }, 'expert': { 'datamode': 'test_pattern', 'fullthresh': 'full_event', 'fullsize': 'full_size', 'fsrange': 'fs_range_vpp' }, } # look in the cfg dictionary for values that match the epics # variables in the pvtable values = {} epics_names_values(pvtable, cfg, values) values['readoutGroup'] = group # program the values ctxt = Context('pva') print(epics_prefix) ctxt.put(epics_prefix + ':READY', 0, wait=True) print(values) ctxt.put(epics_prefix + ':CONFIG', values, wait=True) # the completion of the "put" guarantees that all of the above # have completed (although in no particular order) complete = False for i in range(100): complete = ctxt.get(epics_prefix + ':READY') != 0 if complete: break print('hsd config wait for complete', i) time.sleep(0.1) if complete: print('hsd config complete') else: raise Exception('timed out waiting for hsd configure') ctxt.close() return json.dumps(cfg)
class ts_connector: def __init__(self, json_connect_info): self.connect_info = json.loads(json_connect_info) print('*** connect_info') pp = pprint.PrettyPrinter() pp.pprint(self.connect_info) control_info = self.connect_info['body']['control']['0'][ 'control_info'] self.xpm_base = control_info['pv_base'] + ':XPM:' master_xpm_num = control_info['xpm_master'] self.master_xpm_pv = self.xpm_base + str(master_xpm_num) + ':' self.ctxt = Context('pva') self.get_xpm_info() self.get_readout_group_mask() # unfortunately, the hsd needs the Rx link reset before the Tx, # otherwise we get CRC errors on the link. # try commenting this out since Matt has made the links more reliable #self.xpm_link_reset('Rx') #self.xpm_link_reset('Tx') # must come after clear readout because clear readout increments # the event counters, and the pgp eb needs them to start from zero # comment this out since it was moved to control.py #self.l0_count_reset() # enables listening to deadtime self.xpm_link_enable() self.ctxt.close() def get_readout_group_mask(self): self.readout_group_mask = 0 for _, _, readout_group in self.xpm_info: self.readout_group_mask |= (1 << readout_group) def get_xpm_info(self): self.xpm_info = [] # FIXME: cpo/weaver think this doesn't work for digitizers, # for example, where the DRP node can't learn which XPM port # is feeding it timing information. Currently think we should # try to get the information from the XPM side, instead of the # drp side. for key, node_info in self.connect_info['body']['drp'].items(): try: # FIXME: should have a better method to map xpm ip # address to xpm number (used to create pv names) xpm_id = int(node_info['connect_info']['xpm_id']) xpm_port = node_info['connect_info']['xpm_port'] readout_group = node_info['det_info']['readout'] self.xpm_info.append((xpm_id, xpm_port, readout_group)) except KeyError: pass def xpm_link_disable(self, pv, groups): pv_names = [] for xpm_port in range(14): pv_names.append(pv + 'RemoteLinkId' + str(xpm_port)) print('link_ids: {:}'.format(pv_names)) link_ids = self.ctxt.get(pv_names) pv_names = [] downstream_xpm_names = [] for xpm_port in range(14): pv_names.append(pv + 'LinkGroupMask' + str(xpm_port)) link_masks = self.ctxt.get(pv_names) for i in range(14): xlink = xpm_link(link_ids[i]) # this gets run for all xpm's "downstream" of the master xpm if xlink.is_xpm(): downstream_xpm_names.append(self.xpm_base + str(xlink.xpm_num())) self.xpm_link_disable( self.xpm_base + str(xlink.xpm_num()) + ':', groups) link_masks[ i] = 0xff # xpm to xpm links should be enabled for everything else: link_masks[i] &= ~groups self.ctxt.put(pv_names, link_masks) # this code disables the "master" feature for each of the # downstream xpm's for the readout groups used by the new xpm master pv_names_downstream_xpm_master_enable = [] for name in downstream_xpm_names: for igroup in range(8): if (1 << igroup) & groups: pv_names_downstream_xpm_master_enable.append( name + ':PART:%d:Master' % igroup) num_master_disable = len(pv_names_downstream_xpm_master_enable) if (num_master_disable): print('*** Disable downstream xpm readout group master:', pv_names_downstream_xpm_master_enable) self.ctxt.put(pv_names_downstream_xpm_master_enable, [0] * num_master_disable) def xpm_link_disable_all(self): # Start from the master and recursively remove the groups from each downstream link self.xpm_link_disable(self.master_xpm_pv, self.readout_group_mask) def xpm_link_enable(self): self.xpm_link_disable_all() d = {} for xpm_num, xpm_port, readout_group in self.xpm_info: pvname = self.xpm_base + str( xpm_num) + ':' + 'LinkGroupMask' + str(xpm_port) if pvname in d: d[pvname] |= (1 << readout_group) else: d[pvname] = (1 << readout_group) pv_names = [] values = [] for name, value in d.items(): pv_names.append(name) values.append(value) print('*** setting xpm link enables', pv_names, values) self.ctxt.put(pv_names, values) def xpm_link_reset(self, style): # make pv name that looks like DAQ:LAB2:XPM:1:RxLinkReset11 # for xpm_num 1 and xpm_port 11 pv_names = [] for xpm_num, xpm_port, _ in self.xpm_info: pvname = self.xpm_base + str( xpm_num) + ':' + style + 'LinkReset' + str(xpm_port) pv_names.append(pvname) print('*** xpm link resetting', pv_names) self.ctxt.put(pv_names, len(pv_names) * [1]) # unfortunately need to wait for the links to relock, which # matt says takes "an appreciable fraction of a second". # empirically, the links seem unreliable unless we wait 2s. time.sleep(2) def l0_count_reset(self): pvL0Reset = self.master_xpm_pv + 'GroupL0Reset' print('*** resetting l0 count', self.readout_group_mask) self.ctxt.put(pvL0Reset, self.readout_group_mask)
class P4PProvider(QObject): callbacksignal = pyqtSignal() def __init__(self): QObject.__init__(self) self.callbacksignal.connect(self.mycallback) self.callbackDoneEvent = Event() self.firstCallback = True self.isClosed = True self.channelName = "13SIM1:Pva1:Image" def setChannelName(self, channelName): self.channelName = channelName def getChannelName(self): return self.channelName def start(self): self.ctxt = Context("pva") self.firstCallback = True self.isClosed = False self.subscription = self.ctxt.monitor( self.getChannelName(), self.p4pcallback, request= "field(value,dimension,codec,compressedSize,uncompressedSize)", notify_disconnect=True, ) def stop(self): self.isClosed = True self.ctxt.close() def callback(self, arg): self.NTNDA_Viewer.callback(arg) def p4pcallback(self, arg): if self.isClosed: return self.struct = arg self.callbacksignal.emit() self.callbackDoneEvent.wait() self.callbackDoneEvent.clear() def mycallback(self): struct = self.struct arg = dict() try: argtype = str(type(struct)) if argtype.find("Disconnected") >= 0: arg["status"] = "disconnected" self.callback(arg) self.firstCallback = True self.callbackDoneEvent.set() return if self.firstCallback: arg = dict() arg["status"] = "connected" self.callback(arg) self.firstCallback = False self.callback(arg) arg = dict() arg["value"] = struct["value"] arg["dimension"] = struct["dimension"] arg["codec"] = struct["codec"] arg["compressedSize"] = struct["compressedSize"] arg["uncompressedSize"] = struct["uncompressedSize"] self.callback(arg) self.callbackDoneEvent.set() return except Exception as error: arg["exception"] = repr(error) self.callback(arg) self.callbackDoneEvent.set() return
def hsd_config(connect_str, prefix, cfgtype, detname, detsegm, group): global partitionDelay global epics_prefix global ocfg epics_prefix = prefix cfg = get_config(connect_str, cfgtype, detname, detsegm) # fetch the current configuration for defaults not specified in the configuration ctxt = Context('pva') values = ctxt.get(epics_prefix + ':CONFIG') # fetch the xpm delay partitionDelay = ctxt.get(epics_prefix + ':MONTIMING').msgdelayset print('partitionDelay {:}'.format(partitionDelay)) # # Validate user raw values # raw = cfg['user']['raw'] raw_start = int((raw['start_ns'] * 1300 / 7000 - partitionDelay * 200) * 160 / 200) # in "160MHz"(*13/14) clks # raw_start register is 14 bits if raw_start < 0: print('partitionDelay {:} raw_start_ns {:} raw_start {:}'.format( partitionDelay, raw['start_ns'], raw_start)) raise ValueError('raw_start is too small by {:} ns'.format( -raw_start / 0.16 * 14. / 13)) if raw_start > 0x3fff: print('partitionDelay {:} raw_start_ns {:} raw_start {:}'.format( partitionDelay, raw['start_ns'], raw_start)) raise ValueError('start_ns is too large by {:} ns'.format( (raw_start - 0x3fff) / 0.16 * 14. / 13)) raw_gate = int(raw['gate_ns'] * 0.160 * 13 / 14) # in "160" MHz clks raw_nsamples = raw_gate * 40 # raw_gate register is 14 bits if raw_gate < 0: raise ValueError('raw_gate computes to < 0') if raw_gate > 4000: raise ValueError('raw_gate computes to > 4000; raw_nsamples > 160000') # # Validate user fex values # fex = cfg['user']['fex'] fex_start = int((fex['start_ns'] * 1300 / 7000 - partitionDelay * 200) * 160 / 200) # in "160MHz"(*13/14) clks if fex_start < 0: print('partitionDelay {:} fex_start_ns {:} fex_start {:}'.format( partitionDelay, fex['start_ns'], fex_start)) raise ValueError('fex_start computes to < 0') fex_gate = int(fex['gate_ns'] * 0.160 * 13 / 14) # in "160" MHz clks fex_nsamples = fex_gate * 40 if fex_gate < 0: raise ValueError('fex_gate computes to < 0') # Place no constraint on upper bound. Assumes sparsification will reduce to < 160000 recorded samples # hsd_thr_ilv_native_fine firmware expects xpre,xpost in # of super samples (4 samples) fex_xpre = int((fex['xpre'] + 3) / 4) fex_xpost = int((fex['xpost'] + 3) / 4) # overwrite expert fields from user input expert = cfg['expert'] expert['readoutGroup'] = group expert['enable'] = 1 expert['raw_start'] = raw_start expert['raw_gate'] = raw_gate expert['raw_prescale'] = raw['prescale'] expert['fex_start'] = fex_start expert['fex_gate'] = fex_gate expert['fex_xpre'] = fex_xpre expert['fex_xpost'] = fex_xpost expert['fex_ymin'] = fex['ymin'] expert['fex_ymax'] = fex['ymax'] expert['fex_prescale'] = fex['prescale'] # program the values apply_config(ctxt, cfg) fwver = ctxt.get(epics_prefix + ':FWVERSION').value fwbld = ctxt.get(epics_prefix + ':FWBUILD').value cfg['firmwareVersion'] = fwver cfg['firmwareBuild'] = fwbld print(f'fwver: {fwver}') print(f'fwbld: {fwbld}') ctxt.close() ocfg = cfg return json.dumps(cfg)
class Controller: """ Controller class used to access process variables. Controllers are used for interfacing with both Channel Access and pvAccess process variables. The controller object is initialized using a single protocol has methods for both getting and setting values on the process variables. Attributes: _protocol (str): Protocol for getting values from variables ("pva" for pvAccess, "ca" for Channel Access) _context (Context): P4P threaded context instance for use with pvAccess. _pv_registry (dict): Registry mapping pvname to dict of value and pv monitor _input_pvs (dict): Dictionary of input process variables _output_pvs (dict): Dictionary out output process variables _prefix (str): Prefix to use for accessing variables last_input_update (datetime): Last update of input variables last_output_update (datetime): Last update of output variables Example: ``` # create PVAcess controller controller = Controller("pva") value = controller.get_value("scalar_input") image_value = controller.get_image("image_input") controller.close() ``` """ def __init__(self, protocol: str, input_pvs: dict, output_pvs: dict, prefix): """ Initializes controller. Stores protocol and creates context attribute if using pvAccess. Args: protocol (str): Protocol for getting values from variables ("pva" for pvAccess, "ca" for Channel Access) input_pvs (dict): Dict mapping input variable names to variable output_pvs (dict): Dict mapping output variable names to variable """ self._protocol = protocol self._pv_registry = defaultdict() self._input_pvs = input_pvs self._output_pvs = output_pvs self._prefix = prefix self.last_input_update = "" self.last_output_update = "" # initalize context for pva self._context = None if self._protocol == "pva": self._context = Context("pva") # initialize controller for variable in {**input_pvs, **output_pvs}.values(): if variable.variable_type == "image": self.get_image(variable.name) elif variable.variable_type == "array": self.get_array(variable.name) else: self.get_value(variable.name) def _ca_value_callback(self, pvname, value, *args, **kwargs): """Callback executed by Channel Access monitor. Args: pvname (str): Process variable name value (Union[np.ndarray, float]): Value to assign to process variable. """ pvname = pvname.replace(f"{self._prefix}:", "") self._pv_registry[pvname]["value"] = value if pvname in self._input_pvs: self.last_input_update = datetime.now().strftime( "%m/%d/%Y, %H:%M:%S") if pvname in self._output_pvs: self.last_output_update = datetime.now().strftime( "%m/%d/%Y, %H:%M:%S") def _ca_connection_callback(self, *, pvname, conn, pv): """Callback used for monitoring connection and setting values to None on disconnect. """ # if disconnected, set value to None pvname = pvname.replace(f"{self._prefix}:", "") if not conn: self._pv_registry[pvname]["value"] = None def _pva_value_callback(self, pvname, value): """Callback executed by pvAccess monitor. Args: pvname (str): Process variable name value (Union[np.ndarray, float]): Value to assign to process variable. """ if isinstance(value, Disconnected): self._pv_registry[pvname]["value"] = None else: self._pv_registry[pvname]["value"] = value if pvname in self._input_pvs: self.last_input_update = datetime.now().strftime( "%m/%d/%Y, %H:%M:%S") if pvname in self._output_pvs: self.last_output_update = datetime.now().strftime( "%m/%d/%Y, %H:%M:%S") def _set_up_pv_monitor(self, pvname): """Set up process variable monitor. Args: pvname (str): Process variable name """ if pvname in self._pv_registry: return if self._protocol == "ca": # add to registry (must exist for connection callback) self._pv_registry[pvname] = {"pv": None, "value": None} # create the pv pv_obj = PV( f"{self._prefix}:{pvname}", callback=self._ca_value_callback, connection_callback=self._ca_connection_callback, ) # update registry self._pv_registry[pvname]["pv"] = pv_obj elif self._protocol == "pva": cb = partial(self._pva_value_callback, pvname) # populate registry s.t. initially disconnected will populate self._pv_registry[pvname] = {"pv": None, "value": None} # create the monitor obj mon_obj = self._context.monitor(f"{self._prefix}:{pvname}", cb, notify_disconnect=True) # update registry with the monitor self._pv_registry[pvname]["pv"] = mon_obj def get(self, pvname: str) -> np.ndarray: """ Accesses and returns the value of a process variable. Args: pvname (str): Process variable name """ self._set_up_pv_monitor(pvname) pv = self._pv_registry.get(pvname, None) if pv: return pv["value"] return None def get_value(self, pvname): """Gets scalar value of a process variable. Args: pvname (str): Process variable name. """ value = self.get(pvname) if value is None: value = DEFAULT_SCALAR_VALUE return value def get_image(self, pvname) -> dict: """Gets image data via controller protocol. Args: pvname (str): Image process variable name """ image = None if self._protocol == "ca": image_flat = self.get(f"{pvname}:ArrayData_RBV") nx = self.get(f"{pvname}:ArraySizeX_RBV") ny = self.get(f"{pvname}:ArraySizeY_RBV") x = self.get(f"{pvname}:MinX_RBV") y = self.get(f"{pvname}:MinY_RBV") x_max = self.get(f"{pvname}:MaxX_RBV") y_max = self.get(f"{pvname}:MaxY_RBV") if all([ image_def is not None for image_def in [image_flat, nx, ny, x, y, x_max, y_max] ]): dw = x_max - x dh = y_max - y image = image_flat.reshape(int(nx), int(ny)) elif self._protocol == "pva": # context returns numpy array with WRITEABLE=False # copy to manipulate array below image = self.get(pvname) if image is not None: attrib = image.attrib x = attrib["x_min"] y = attrib["y_min"] dw = attrib["x_max"] - attrib["x_min"] dh = attrib["y_max"] - attrib["y_min"] image = copy.copy(image) if image is not None: return { "image": [image], "x": [x], "y": [y], "dw": [dw], "dh": [dh], } else: return DEFAULT_IMAGE_DATA def get_array(self, pvname) -> dict: """Gets array data via controller protocol. Args: pvname (str): Image process variable name """ array = None if self._protocol == "ca": array_flat = self.get(f"{pvname}:ArrayData_RBV") shape = self.get(f"{pvname}:ArraySize_RBV") if all( [array_def is not None for array_def in [array_flat, shape]]): array = np.array(array_flat).reshape(shape) elif self._protocol == "pva": # context returns numpy array with WRITEABLE=False # copy to manipulate array below array = self.get(pvname) if array is not None: return array else: return np.array([]) def put(self, pvname, value: float, timeout=1.0) -> None: """Assign the value of a scalar process variable. Args: pvname (str): Name of the process variable value (float): Value to assing to process variable. timeout (float): Operation timeout in seconds """ self._set_up_pv_monitor(pvname) # allow no puts before a value has been collected registered = self.get(pvname) # if the value is registered if registered is not None: if self._protocol == "ca": self._pv_registry[pvname]["pv"].put(value, timeout=timeout) elif self._protocol == "pva": self._context.put(f"{self._prefix}:{pvname}", value, throw=False, timeout=timeout) else: logger.debug(f"No initial value set for {pvname}.") def put_image( self, pvname, image_array: np.ndarray = None, x_min: float = None, x_max: float = None, y_min: float = None, y_max: float = None, timeout: float = 1.0, ) -> None: """Assign the value of a image process variable. Allows updates to individual attributes. Args: pvname (str): Name of the process variable image_array (np.ndarray): Value to assing to process variable. x_min (float): Minimum x value x_max (float): Maximum x value y_min (float): Minimum y value y_max (float): Maximum y value timeout (float): Operation timeout in seconds """ self._set_up_pv_monitor(pvname) # allow no puts before a value has been collected registered = self.get_image(pvname) # if the value is registered if registered is not None: if self._protocol == "ca": if image_array is not None: self._pv_registry[f"{pvname}:ArrayData_RBV"]["pv"].put( image_array.flatten(), timeout=timeout) if x_min: self._pv_registry[f"{pvname}:MinX_RBV"]["pv"].put( x_min, timeout=timeout) if x_max: self._pv_registry[f"{pvname}:MaxX_RBV"]["pv"].put( x_max, timeout=timeout) if y_min: self._pv_registry[f"{pvname}:MinY_RBV"]["pv"].put( y_min, timeout=timeout) if y_max: self._pv_registry[f"{pvname}:MaxY_RBV"]["pv"].put( y_max, timeout=timeout) elif self._protocol == "pva": # compose normative type pv = self._pv_registry[pvname] pv_array = pv["value"] if image_array: image_array.attrib = pv_array.attrib else: image_array = pv_array if x_min: image_array.attrib.x_min = x_min if x_max: image_array.attrib.x_max = x_max if y_min: image_array.attrib.y_min = y_min if y_max: image_array.attrib.y_max = y_max self._context.put(pvname, image_array, throw=False, timeout=timeout) else: logger.debug(f"No initial value set for {pvname}.") def put_array( self, pvname, array: np.ndarray = None, timeout: float = 1.0, ) -> None: """Assign the value of an array process variable. Allows updates to individual attributes. Args: pvname (str): Name of the process variable array (np.ndarray): Value to assing to process variable. timeout (float): Operation timeout in seconds """ self._set_up_pv_monitor(pvname) # allow no puts before a value has been collected registered = self.get_array(pvname) # if the value is registered if registered is not None: if self._protocol == "ca": if array is not None: self._pv_registry[f"{pvname}:ArrayData_RBV"]["pv"].put( array.flatten(), timeout=timeout) elif self._protocol == "pva": # compose normative type pv = self._pv_registry[pvname] array = pv["value"] self._context.put(pvname, array, throw=False, timeout=timeout) else: logger.debug(f"No initial value set for {pvname}.") def close(self): if self._protocol == "pva": self._context.close()