def hsd_unconfig(prefix): global epics_prefix epics_prefix = prefix ctxt = Context('pva') values = ctxt.get(epics_prefix + ':CONFIG') values['enable'] = 0 print(values) print(epics_prefix) ctxt.put(epics_prefix + ':CONFIG', values, wait=True) # This handshake seems to be necessary, or at least the .get() complete = False for i in range(100): complete = ctxt.get(epics_prefix + ':READY') != 0 if complete: break print('hsd_unconfig wait for complete', i) time.sleep(0.1) if complete: print('hsd unconfig complete') else: raise Exception('timed out waiting for hsd_unconfig') ctxt.close() return None
class CustomCollector(): def __init__(self,hutch): self.pvactx = Context('pva') self._hutch = hutch self._glob = {} self._pvs = {} def registerGlobalPV(self, name, pv): self._glob[name] = pv def registerPV(self, name, pv): pvs = [] for i in range(8): pvs.append( pv%i ) self._pvs[name] = pvs def collect(self): for name,pvs in self._glob.items(): g = GaugeMetricFamily(name, documentation='', labels=['instrument']) values = self.pvactx.get(pvs) logging.debug('collect %s: %s' % (pvs, str(values))) g.add_metric([self._hutch], values.raw.value) yield g for name,pvs in self._pvs.items(): g = GaugeMetricFamily(name, documentation='', labels=['instrument','partition']) values = self.pvactx.get(pvs) logging.debug('collect %s: %s' % (pvs, str(values))) for i in range(8): g.add_metric([self._hutch,str(i)], values[i].raw.value) yield g
def wave8_config(prefix, connect_str, cfgtype, detname, detsegm, grp): global ctxt global lane global group group = grp cfg = get_config(connect_str, cfgtype, detname, detsegm) ocfg = cfg ctxt = Context('pva') epics_prefix = prefix + ':Top:' user_to_expert(ctxt, epics_prefix, cfg, full=True) # Assert clears names_clr = [ epics_prefix + 'BatcherEventBuilder:Blowoff', epics_prefix + 'TimingFrameRx:RxCountReset', epics_prefix + 'RawBuffers:CntRst', epics_prefix + 'Integrators:CntRst' ] values = [1] * len(names_clr) print('names {:}'.format(names_clr)) ctxt.put(names_clr, values) config_expert(ctxt, epics_prefix, cfg['expert']) ctxt.put(epics_prefix + 'TriggerEventManager:TriggerEventBuffer[%d]:MasterEnable' % lane, 1, wait=True) time.sleep(0.2) # Deassert clears values = [0] * len(names_clr) print('names {:}'.format(names_clr)) ctxt.put(names_clr, values) ctxt.put(epics_prefix + 'BatcherEventBuilder:Blowoff', 0, wait=True) cfg['firmwareVersion'] = ctxt.get(epics_prefix + 'AxiVersion:FpgaVersion').raw.value cfg['firmwareBuild'] = ctxt.get(epics_prefix + 'AxiVersion:BuildStamp').raw.value ctxt.close() v = json.dumps(cfg) return v
def test_constant_variable_pva(value, prefix, server, model): ctxt = Context("pva", conf=PVA_CONFIG, maxsize=2) #check constant variable assignment for _, variable in model.input_variables.items(): pvname = f"{prefix}:{variable.name}" if variable.variable_type == "scalar": count = 3 successful_put = False while count > 0 and not successful_put: try: ctxt.put(pvname, value) successful_put = True except: ctxt.close() del ctxt time.sleep(3) ctxt = Context("pva", conf=PVA_CONFIG) count -= 1 if count == 0: raise Exception("Failed puts.") for _, variable in model.input_variables.items(): if variable.variable_type == "scalar": pvname = f"{prefix}:{variable.name}" count = 3 successful_get = False val = None while count > 0 and not successful_get: try: val = ctxt.get(pvname) successful_get = True except: ctxt.close() del ctxt time.sleep(5) ctxt = Context("pva", conf=PVA_CONFIG) time.sleep(1) count -= 1 if count == 0: raise Exception("Failed gets.") if variable.is_constant: assert val != value else: assert val == value ctxt.close()
def hps_connect(root): ctxt = Context('pva') d = {} d['addr'] = ctxt.get(bldName + ':ADDR') d['port'] = ctxt.get(bldName + ':PORT') print('hps_connect {:}'.format(d)) ctxt.close() return d
def hsd_connect(epics_prefix): # Retrieve connection information from EPICS ctxt = Context('pva') values = ctxt.get(epics_prefix + ':PADDR_U') print(values) ctxt.close() d = {} d['paddr'] = values return json.dumps(d)
class CustomCollector(): def __init__(self): self.pvactx = Context('pva') def collect(self): g = GaugeMetricFamily('drp_dead_frac', documentation='', labels=['partition']) for p in range(8): value = self.pvactx.get('DAQ:LAB2:XPM:2:PART:%d:DeadFrac' % p) print('collect', value.raw.value) g.add_metric([str(p)], value.raw.value) yield g
def hsd_connect(epics_prefix): # Retrieve connection information from EPICS # May need to wait for other processes here {PVA Server, hsdioc}, so poll ctxt = Context('pva') for i in range(50): values = ctxt.get(epics_prefix+':PADDR_U') if values!=0: break print('{:} is zero, retry'.format(epics_prefix+':PADDR_U')) time.sleep(0.1) ctxt.close() d = {} d['paddr'] = values return json.dumps(d)
def wave8_connect(epics_prefix): # Retrieve connection information from EPICS # May need to wait for other processes here, so poll ctxt = Context('pva') for i in range(50): values = ctxt.get(epics_prefix+':Top:TriggerEventManager:XpmMessageAligner:RxId').raw.value if values!=0: break print('{:} is zero, retry'.format(epics_prefix+':Top:TriggerEventManager:XpmMessageAligner:RxId')) time.sleep(0.1) ctxt.close() d = {} d['paddr'] = values return json.dumps(d)
def capvget(pvname, as_string=False, count=None, as_numpy=True, timeout=5.0, use_monitor=False): global ctxt #return epics.caget(pvname, as_string=as_string, count=count, as_numpy=as_numpy, timeout=timeout, use_monitor=use_monitor) #pvaccess.Channel(PVNAME, pvaccess.CA).get().getPyObject() #channel = pvaccess.Channel(pvname, pvaccess.CA) #return channel.get().getPyObject() if pvname.startswith('pva://'): if ctxt is None: from p4p.client.thread import Context ctxt = Context('pva') return ctxt.get(pvname[6:]) else: return epics.caget(pvname)
class CustomCollector(): def __init__(self): self.pvactx = Context('pva') self._name = None self._pv = None def registerPV(self, name, pv): self._name = name self._pv = pv def collect(self): if self._pv is None: return g = GaugeMetricFamily(self._name, documentation='', labels=['partition']) for p in range(8): pv = self._pv % p value = self.pvactx.get(pv) logging.debug('collect %s: %s' % (pv, str(value.raw.value))) g.add_metric([str(p)], value.raw.value) yield g
def apply_config(cfg): global pv_prefix rcfg = {} rcfg = cfg.copy() rcfg['user'] = {} rcfg['expert'] = {} linacMode = cfg['user']['LINAC'] rcfg['user']['LINAC'] = linacMode rcfg['user']['Cu' if linacMode == 0 else 'SC'] = {} pvdict = {} # dictionary of epics pv name : value for group in readout_groups: if linacMode == 0: # Cu grp_prefix = 'group' + str(group) + '_eventcode' eventcode = cfg['user']['Cu'][grp_prefix] rcfg['user']['Cu'][grp_prefix] = eventcode pvdict[str(group) + ':L0Select'] = 2 # eventCode pvdict[str(group) + ':L0Select_EventCode'] = eventcode pvdict[str(group) + ':DstSelect'] = 1 # DontCare else: # SC grp_prefix = 'group' + str(group) grp = cfg['user']['SC'][grp_prefix] rcfg['user']['SC'][grp_prefix] = grp pvdict[str(group) + ':L0Select'] = grp['trigMode'] pvdict[str(group) + ':L0Select_FixedRate'] = grp['fixed']['rate'] pvdict[str(group) + ':L0Select_ACRate'] = grp['ac']['rate'] pvdict[str(group) + ':L0Select_EventCode'] = 0 # not an option pvdict[str(group) + ':L0Select_Sequence'] = grp['seq']['mode'] pvdict[str(group) + ':DstSelect'] = grp['destination']['select'] # convert ac.ts0 through ac.ts5 to L0Select_ACTimeslot bitmask tsmask = 0 for tsnum in range(6): tsval = grp['ac']['ts' + str(tsnum)] tsmask |= 1 << tsval pvdict[str(group) + ':L0Select_ACTimeslot'] = tsmask # L0Select_SeqBit is one var used by all of seq.(burst/fixed/local) if grp['seq']['mode'] == 15: # burst seqbit = grp['seq']['burst']['mode'] elif grp['seq']['mode'] == 16: # fixed rate seqbit = grp['seq']['fixed']['rate'] elif grp['seq']['mode'] == 17: # local seqbit = grp['seq']['local']['rate'] else: raise ValueError('Illegal value for trigger sequence mode') pvdict[str(group) + ':L0Select_SeqBit'] = seqbit # DstSelect_Mask should come from destination.dest0 through dest15 dstmask = 0 for dstnum in range(16): dstval = grp['destination']['dest' + str(dstnum)] if dstval: dstmask |= 1 << dstnum pvdict[str(group) + ':DstSelect_Mask'] = dstmask grp_prefix = 'group' + str(group) grp = cfg['expert'][grp_prefix] rcfg['expert'][grp_prefix] = grp # 4 InhEnable/InhInterval/InhLimit for inhnum in range(4): pvdict[str(group) + ':InhInterval' + str(inhnum)] = grp['inhibit' + str(inhnum)]['interval'] pvdict[str(group) + ':InhLimit' + str(inhnum)] = grp['inhibit' + str(inhnum)]['limit'] pvdict[str(group) + ':InhEnable' + str(inhnum)] = grp['inhibit' + str(inhnum)]['enable'] names = list(pvdict.keys()) values = list(pvdict.values()) names = [pv_prefix + 'PART:' + n for n in names] # program the values ctxt = Context('pva') ctxt.put(names, values) # Capture firmware version for persistence in xtc #rcfg['firmwareVersion'] = ctxt.get(pv_prefix+'FwVersion').raw.value rcfg['firmwareBuild'] = ctxt.get(pv_prefix + 'FwBuild').raw.value ctxt.close() return json.dumps(rcfg)
class AxisCom: def __init__(self, url_string, axisNum=1, log_debug=True): self.pvpfx = None # PV prefix, like IOC:m1 self.ctxt = None # P4P context, if any self.log_debug = log_debug if url_string.startswith("pva://"): self.url_scheme = "pva://" self.pvpfx = url_string[6:] from p4p.client.thread import Context self.ctxt = Context("pva") elif url_string.startswith("ca://"): # Channel access self.url_scheme = "ca://" self.pvpfx = url_string[5:] import epics as epics self.epics = epics else: help_and_exit(self, url_string, "invalid scheme") def get( self, pvsuf, as_string=False, count=None, as_numpy=True, timeout=25.0, use_monitor=False, ): pvname = self.pvpfx + pvsuf fullname = self.url_scheme + pvname ret = None if as_string == True: raise Exception("as_string=True not supported") if self.log_debug: print( f"{datetime.datetime.now():%Y-%m-%d %H:%M:%S} {filnam} get {fullname}" ) if self.ctxt is not None: ret = self.ctxt.get(pvname, timeout=timeout) else: ret = self.epics.caget(pvname, timeout=timeout, use_monitor=use_monitor) if self.log_debug: print( f"{datetime.datetime.now():%Y-%m-%d %H:%M:%S} {filnam} get {fullname} ret={ret}" ) if ret is None: raise Exception("get None") return ret def put( self, pvsuf, value, wait=False, timeout=5.0, ): pvname = self.pvpfx + pvsuf fullname = self.url_scheme + pvname ret = None if self.log_debug: if wait: print( f"{datetime.datetime.now():%Y-%m-%d %H:%M:%S} {filnam} put {fullname} timeout={timeout} wait={wait} value={value}" ) print( f"{datetime.datetime.now():%Y-%m-%d %H:%M:%S} {filnam} put {fullname} value={value}" ) if self.ctxt is not None: ret = self.ctxt.put(pvname, value, timeout=timeout, wait=wait) if self.log_debug: print( f"{datetime.datetime.now():%Y-%m-%d %H:%M:%S} {filnam} put {fullname} value={value} pvput_ret={ret}" ) else: caput_ret = self.epics.caput(pvname, value, timeout=timeout, wait=wait) # This function returns 1 on success, # and a negative number if the timeout has been exceeded if self.log_debug: print( f"{datetime.datetime.now():%Y-%m-%d %H:%M:%S} {filnam} put {fullname} value={value} caput_ret={ret}" ) if caput_ret != 1: raise Exception( f"caput({pvname},{value}) returned error {caput_ret}") def putDbgStrToLOG(self, value, wait=True, timeout=5.0): pvsuf = "-DbgStrToLOG" try: self.put(pvsuf, value, wait=wait, timeout=timeout) except Exception as ex: print( f"{datetime.datetime.now():%Y-%m-%d %H:%M:%S} {filnam} put {pvsuf} value={value} ex={ex}" ) def getMotorPvName(self): return self.pvpfx
def wave8_config(prefix,connect_str,cfgtype,detname,detsegm,group): global ctxt cfg = get_config(connect_str,cfgtype,detname,detsegm) ctxt = Context('pva') # | timing fiducial # PartitionDelay | TriggerEventManager.TriggerEventBuffer receives xpm trigger # TriggerDelay | TriggerEventManager.triggerBus asserts trigger # IntStart | Integrators.intStart (baseline latched) # IntLen | Intregrators.intEnd # | RawDataBuffer start # RawBuffLen | RawDataBuffer End epics_prefix = prefix + ':Top:' partitionDelay = ctxt.get(epics_prefix+'TriggerEventManager:XpmMessageAligner:PartitionDelay[%d]'%group) raw = cfg['user']['raw'] rawStart = raw['start_ns'] triggerDelay = rawStart*1300/7000 - partitionDelay*200 print('partitionDelay {:} rawStart {:} triggerDelay {:}'.format(partitionDelay,rawStart,triggerDelay)) if triggerDelay < 0: raise ValueError('triggerDelay computes to < 0') rawNsamples = int(raw['gate_ns']*0.25) if rawNsamples>256: raise ValueError('raw.gate_ns > 1020') raw['nsamples'] = rawNsamples fex = cfg['user']['fex'] intStart = fex['start_ns'] if intStart < rawStart: print('fex.start_ns {:} raw.start_ns {:}'.format(intStart,rawStart)) raise ValueError('fex.start_ns < raw.start_ns') fexTrigDelay = int((intStart-rawStart)*250/1000) if fexTrigDelay > 255: raise ValueError('fex.start_ns > raw.start_ns + 1020') fexNsamples = int(fex['gate_ns']*0.25) if fexNsamples>255: raise ValueError('fex.gate_ns > 1020') fex['nsamples'] = rawNsamples # Assert clears names_clr = [epics_prefix+'BatcherEventBuilder:Blowoff', epics_prefix+'TimingFrameRx:RxCountReset', epics_prefix+'RawBuffers:CntRst', epics_prefix+'Integrators:CntRst'] values = [1]*len(names_clr) print('names {:}'.format(names_clr)) ctxt.put(names_clr,values) expert = cfg['expert']['Top'] expert['TriggerEventManager']['TriggerEventBuffer[0]']['TriggerDelay'] = triggerDelay for i in range(8): expert['RawBuffers']['BuffEn[%d]'%i] = raw['enable[%d]'%i] # Firmware needs a value one less expert['RawBuffers']['BuffLen'] = rawNsamples-1 # Firmware needs a value one less prescale = raw['prescale'] if prescale>0: prescale -= 1 expert['RawBuffers']['TrigPrescale'] = prescale expert['Integrators']['TrigDelay'] = fexTrigDelay # Firmware needs a value one less expert['Integrators']['IntegralSize'] = fexNsamples-1 expert['Integrators']['BaselineSize'] = fex['baseline'] for i in range(4): expert['Integrators']['CorrCoefficientFloat64[%d]'%i] = fex['coeff[%d]'%i] expert['TriggerEventManager']['TriggerEventBuffer[0]']['Partition'] = group names = [] values = [] epics_put(cfg['expert'],prefix+':',names,values) ctxt.put(names,values) ctxt.put(epics_prefix+'TriggerEventManager:TriggerEventBuffer[0]:MasterEnable', 1, wait=True) time.sleep(0.2) # Deassert clears values = [0]*len(names_clr) ctxt.put(names_clr,values) ctxt.put(epics_prefix+'BatcherEventBuilder:Blowoff', 0, wait=True) cfg['firmwareVersion'] = ctxt.get(epics_prefix+'AxiVersion:FpgaVersion').raw.value cfg['firmwareBuild' ] = ctxt.get(epics_prefix+'AxiVersion:BuildStamp').raw.value ctxt.close() v = json.dumps(cfg) return v
class Controller: """ Controller class used to get and put process variables. Attributes ---------- protocol: str Protocol to use ("pva", "ca") context: p4p.client.thread.Context p4p threaded context instance """ def __init__(self, protocol: str): """ Store protocol and initialize context if using PVAccess. """ self.protocol = protocol # initalize context for pva self.context = None if protocol == "pva": self.context = Context("pva") def get(self, pvname: str): """ Get the value of a process variable. Parameters ---------- pvname: str Name of the process variable Returns ------- np.ndarray Returns numpy array containing value. """ if self.protocol == "ca": return caget(pvname) elif self.protocol == "pva": return self.context.get(pvname) def get_image(self, pvname): """ Gets image data based on protocol. Arguments --------- pvname: str Name of process variable Returns ------- dict Formatted image data of the form ``` { "image": [np.ndarray], "x": [float], "y": [float], "dw": [float], "dh": [float], } ``` """ if self.protocol == "ca": pvname = pvname.replace(":ArrayData_RBV", "") nx = self.get(f"{pvname}:ArraySizeX_RBV") ny = self.get(f"{pvname}:ArraySizeY_RBV") dw = self.get(f"{pvname}:dw") dh = self.get(f"{pvname}:dh") image = self.get(f"{pvname}:ArrayData_RBV") image = image.reshape(int(nx), int(ny)) elif self.protocol == "pva": # context returns np array with WRITEABLE=False # copy to manipulate array below output = self.get(pvname) attrib = output.attrib dw = attrib["dw"] dh = attrib["dh"] nx, ny = output.shape image = copy.copy(output) return { "image": [image], "x": [-dw / 2], "y": [-dh / 2], "dw": [dw], "dh": [dh], } def put(self, pvname, value: Union[np.ndarray, float]) -> None: """ Assign the value of a process variable. Parameters ---------- pvname: str Name of the process variable value Value to put. Either float or numpy array """ if self.protocol == "ca": caput(pvname, value) elif self.protocol == "pva": self.context.put(pvname, value)
def hsd_config(connect_str, epics_prefix, cfgtype, detname, detsegm, group): cfg = get_config(connect_str, cfgtype, detname, detsegm) # fetch the current configuration for defaults not specified in the configuration ctxt = Context('pva') values = ctxt.get(epics_prefix + ':CONFIG') # fetch the xpm delay partitionDelay = ctxt.get(epics_prefix + ':MONTIMING').msgdelayset print('partitionDelay {:}'.format(partitionDelay)) # # Validate user raw values # raw = cfg['user']['raw'] raw_start = (raw['start_ns'] * 1300 / 7000 - partitionDelay * 200) * 160 / 200 # in "160MHz"(*13/14) clks if raw_start < 0: print('partitionDelay {:} raw_start_ns {:} raw_start {:}'.format( partitionDelay, raw['start_ns'], raw_start)) raise ValueError('raw_start computes to < 0') raw_gate = int(raw['gate_ns'] * 0.160 * 13 / 14) # in "160" MHz clks raw_nsamples = raw_gate * 40 if raw_gate < 0: raise ValueError('raw_gate computes to < 0') if raw_gate > 4000: raise ValueError('raw_gate computes to > 4000; raw_nsamples > 160000') # # Validate user fex values # fex = cfg['user']['fex'] fex_start = int((fex['start_ns'] * 1300 / 7000 - partitionDelay * 200) * 160 / 200) # in "160MHz"(*13/14) clks if fex_start < 0: print('partitionDelay {:} fex_start_ns {:} fex_start {:}'.format( partitionDelay, fex['start_ns'], fex_start)) raise ValueError('fex_start computes to < 0') fex_gate = int(fex['gate_ns'] * 0.160 * 13 / 14) # in "160" MHz clks fex_nsamples = fex_gate * 40 if fex_gate < 0: raise ValueError('fex_gate computes to < 0') # Place no constraint on upper bound. Assumes sparsification will reduce to < 160000 recorded samples # hsd_thr_ilv_native_fine firmware expects xpre,xpost in # of super samples (4 samples) fex_xpre = int((fex['xpre'] + 3) / 4) fex_xpost = int((fex['xpost'] + 3) / 4) # overwrite expert fields from user input expert = cfg['expert'] expert['readoutGroup'] = group expert['enable'] = 1 expert['raw_start'] = raw_start expert['raw_gate'] = raw_gate expert['raw_prescale'] = raw['prescale'] expert['fex_start'] = fex_start expert['fex_gate'] = fex_gate expert['fex_xpre'] = fex_xpre expert['fex_xpost'] = fex_xpost expert['fex_prescale'] = fex['prescale'] # program the values print(epics_prefix) ctxt.put(epics_prefix + ':READY', 0, wait=True) ctxt.put(epics_prefix + ':CONFIG', expert, wait=True) # the completion of the "put" guarantees that all of the above # have completed (although in no particular order) complete = False for i in range(100): complete = ctxt.get(epics_prefix + ':READY') != 0 if complete: break print('hsd config wait for complete', i) time.sleep(0.1) if complete: print('hsd config complete') else: raise Exception('timed out waiting for hsd configure') cfg['firmwareVersion'] = ctxt.get(epics_prefix + ':FWVERSION').raw.value cfg['firmwareBuild'] = ctxt.get(epics_prefix + ':FWBUILD').raw.value ctxt.close() return json.dumps(cfg)
def ts_config(connect_json,cfgtype,detname,detsegm): cfg = get_config(connect_json,cfgtype,detname,detsegm) connect_info = json.loads(connect_json) # get the list of readout groups that the user has selected # so we only configure those readout_groups = [] connect_info = json.loads(connect_json) for nodes in connect_info['body']['drp'].values(): readout_groups.append(nodes['det_info']['readout']) readout_groups = set(readout_groups) control_info = connect_info['body']['control']['0']['control_info'] xpm_master = control_info['xpm_master'] pv_prefix = control_info['pv_base']+':XPM:'+str(xpm_master)+':PART:' rcfg = cfg.copy() rcfg['user' ] = {} rcfg['expert'] = {} linacMode = cfg['user']['LINAC'] rcfg['user']['LINAC'] = linacMode rcfg['user']['Cu' if linacMode==0 else 'SC'] = {} pvdict = {} # dictionary of epics pv name : value for group in readout_groups: if linacMode == 0: # Cu grp_prefix = 'group'+str(group)+'_eventcode' eventcode = cfg['user']['Cu'][grp_prefix] rcfg['user']['Cu'][grp_prefix] = eventcode pvdict[str(group)+':L0Select' ] = 2 # eventCode pvdict[str(group)+':L0Select_EventCode'] = eventcode pvdict[str(group)+':DstSelect' ] = 1 # DontCare else: # SC grp_prefix = 'group'+str(group) grp = cfg['user']['SC'][grp_prefix] rcfg['user']['SC'][grp_prefix] = grp pvdict[str(group)+':L0Select' ] = grp['trigMode'] pvdict[str(group)+':L0Select_FixedRate'] = grp['fixed']['rate'] pvdict[str(group)+':L0Select_ACRate' ] = grp['ac']['rate'] pvdict[str(group)+':L0Select_EventCode'] = 0 # not an option pvdict[str(group)+':L0Select_Sequence' ] = grp['seq']['mode'] pvdict[str(group)+':DstSelect' ] = grp['destination']['select'] # convert ac.ts0 through ac.ts5 to L0Select_ACTimeslot bitmask tsmask = 0 for tsnum in range(6): tsval = grp['ac']['ts'+str(tsnum)] tsmask |= 1<<tsval pvdict[str(group)+':L0Select_ACTimeslot'] = tsmask # L0Select_SeqBit is one var used by all of seq.(burst/fixed/local) if grp['seq']['mode']==15: # burst seqbit = grp['seq']['burst']['mode'] elif grp['seq']['mode']==16: # fixed rate seqbit = grp['seq']['fixed']['rate'] elif grp['seq']['mode']==17: # local seqbit = grp['seq']['local']['rate'] else: raise ValueError('Illegal value for trigger sequence mode') pvdict[str(group)+':L0Select_SeqBit'] = seqbit # DstSelect_Mask should come from destination.dest0 through dest15 dstmask = 0 for dstnum in range(16): dstval = grp['destination']['dest'+str(dstnum)] if dstval: dstmask |= 1<<dstnum pvdict[str(group)+':DstSelect_Mask'] = dstmask grp_prefix = 'group'+str(group) grp = cfg['expert'][grp_prefix] rcfg['expert'][grp_prefix] = grp # 4 InhEnable/InhInterval/InhLimit for inhnum in range(4): pvdict[str(group)+':InhInterval'+str(inhnum)] = grp['inhibit'+str(inhnum)]['interval'] pvdict[str(group)+':InhLimit'+str(inhnum)] = grp['inhibit'+str(inhnum)]['limit'] pvdict[str(group)+':InhEnable'+str(inhnum)] = grp['inhibit'+str(inhnum)]['enable'] names = list(pvdict.keys()) values = list(pvdict.values()) names = [pv_prefix+n for n in names] # program the values ctxt = Context('pva') ctxt.put(names,values) # Capture firmware version for persistence in xtc pv_prefix = control_info['pv_base']+':XPM:'+str(xpm_master)+':' #rcfg['firmwareVersion'] = ctxt.get(pv_prefix+'FwVersion').raw.value rcfg['firmwareBuild' ] = ctxt.get(pv_prefix+'FwBuild').raw.value ctxt.close() return json.dumps(rcfg)
def hsd_config(connect_str, prefix, cfgtype, detname, detsegm, group): global partitionDelay global epics_prefix global ocfg epics_prefix = prefix cfg = get_config(connect_str, cfgtype, detname, detsegm) # fetch the current configuration for defaults not specified in the configuration ctxt = Context('pva') values = ctxt.get(epics_prefix + ':CONFIG') # fetch the xpm delay partitionDelay = ctxt.get(epics_prefix + ':MONTIMING').msgdelayset print('partitionDelay {:}'.format(partitionDelay)) # # Validate user raw values # raw = cfg['user']['raw'] raw_start = int((raw['start_ns'] * 1300 / 7000 - partitionDelay * 200) * 160 / 200) # in "160MHz"(*13/14) clks # raw_start register is 14 bits if raw_start < 0: print('partitionDelay {:} raw_start_ns {:} raw_start {:}'.format( partitionDelay, raw['start_ns'], raw_start)) raise ValueError('raw_start is too small by {:} ns'.format( -raw_start / 0.16 * 14. / 13)) if raw_start > 0x3fff: print('partitionDelay {:} raw_start_ns {:} raw_start {:}'.format( partitionDelay, raw['start_ns'], raw_start)) raise ValueError('start_ns is too large by {:} ns'.format( (raw_start - 0x3fff) / 0.16 * 14. / 13)) raw_gate = int(raw['gate_ns'] * 0.160 * 13 / 14) # in "160" MHz clks raw_nsamples = raw_gate * 40 # raw_gate register is 14 bits if raw_gate < 0: raise ValueError('raw_gate computes to < 0') if raw_gate > 4000: raise ValueError('raw_gate computes to > 4000; raw_nsamples > 160000') # # Validate user fex values # fex = cfg['user']['fex'] fex_start = int((fex['start_ns'] * 1300 / 7000 - partitionDelay * 200) * 160 / 200) # in "160MHz"(*13/14) clks if fex_start < 0: print('partitionDelay {:} fex_start_ns {:} fex_start {:}'.format( partitionDelay, fex['start_ns'], fex_start)) raise ValueError('fex_start computes to < 0') fex_gate = int(fex['gate_ns'] * 0.160 * 13 / 14) # in "160" MHz clks fex_nsamples = fex_gate * 40 if fex_gate < 0: raise ValueError('fex_gate computes to < 0') # Place no constraint on upper bound. Assumes sparsification will reduce to < 160000 recorded samples # hsd_thr_ilv_native_fine firmware expects xpre,xpost in # of super samples (4 samples) fex_xpre = int((fex['xpre'] + 3) / 4) fex_xpost = int((fex['xpost'] + 3) / 4) # overwrite expert fields from user input expert = cfg['expert'] expert['readoutGroup'] = group expert['enable'] = 1 expert['raw_start'] = raw_start expert['raw_gate'] = raw_gate expert['raw_prescale'] = raw['prescale'] expert['fex_start'] = fex_start expert['fex_gate'] = fex_gate expert['fex_xpre'] = fex_xpre expert['fex_xpost'] = fex_xpost expert['fex_ymin'] = fex['ymin'] expert['fex_ymax'] = fex['ymax'] expert['fex_prescale'] = fex['prescale'] # program the values apply_config(ctxt, cfg) fwver = ctxt.get(epics_prefix + ':FWVERSION').value fwbld = ctxt.get(epics_prefix + ':FWBUILD').value cfg['firmwareVersion'] = fwver cfg['firmwareBuild'] = fwbld print(f'fwver: {fwver}') print(f'fwbld: {fwbld}') ctxt.close() ocfg = cfg return json.dumps(cfg)
class ts_connector: def __init__(self, json_connect_info): self.connect_info = json.loads(json_connect_info) print('*** connect_info', self.connect_info) control_info = self.connect_info['body']['control']['0'][ 'control_info'] self.xpm_base = control_info['pv_base'] + ':XPM:' master_xpm_num = control_info['xpm_master'] self.master_xpm_pv = self.xpm_base + str(master_xpm_num) + ':' self.ctxt = Context('pva') self.get_xpm_info() self.get_readout_group_mask() # unfortunately, the hsd needs the Rx link reset before the Tx, # otherwise we get CRC errors on the link. # try commenting this out since Matt has made the links more reliable #self.xpm_link_reset('Rx') #self.xpm_link_reset('Tx') # must come after clear readout because clear readout increments # the event counters, and the pgp eb needs them to start from zero # comment this out since it was moved to control.py #self.l0_count_reset() # enables listening to deadtime self.xpm_link_enable() self.ctxt.close() def get_readout_group_mask(self): self.readout_group_mask = 0 for _, _, readout_group in self.xpm_info: self.readout_group_mask |= (1 << readout_group) def get_xpm_info(self): self.xpm_info = [] for key, node_info in self.connect_info['body']['drp'].items(): try: # FIXME: should have a better method to map xpm ip # address to xpm number (used to create pv names) xpm_id = int(node_info['connect_info']['xpm_ip'].split('.')[2]) xpm_port = node_info['connect_info']['xpm_port'] readout_group = node_info['det_info']['readout'] self.xpm_info.append((xpm_id, xpm_port, readout_group)) except KeyError: pass def xpm_link_disable_all(self): # FIXME: need a mechanism to disable unused links in all # downstream XPMs. For now, just clear out our readout # groups from all the XPMs we know about from the collection, # which comes from the "remote link id" info in the drp nodes. xpms = [xpm_num for xpm_num, _, _ in self.xpm_info] unique_xpms = set(xpms) pv_names = [] for xpm_num in unique_xpms: for xpm_port in range(32): pv_names.append(self.xpm_base + str(xpm_num) + ':' + 'LinkGroupMask' + str(xpm_port)) current_group_masks = self.ctxt.get(pv_names) print(current_group_masks) # don't clear out group_mask 0xff (an indication that it's # a downstream XPM link) #pv_names_to_clear = [pv_name for (pv_name,group_mask) in zip(pv_names,current_group_masks) if (group_mask & self.readout_group_mask) and (group_mask != 0xff)] #print('*** clearing xpm links',pv_names_to_clear) #self.ctxt.put(pv_names_to_clear,len(pv_names_to_clear)*[0]) def xpm_link_enable(self): self.xpm_link_disable_all() pv_names = [] values = [] for xpm_num, xpm_port, readout_group in self.xpm_info: pvname = self.xpm_base + str( xpm_num) + ':' + 'LinkGroupMask' + str(xpm_port) pv_names.append(pvname) values.append((1 << readout_group)) print('*** setting xpm link enables', pv_names, values) self.ctxt.put(pv_names, values) def xpm_link_reset(self, style): # make pv name that looks like DAQ:LAB2:XPM:1:RxLinkReset11 # for xpm_num 1 and xpm_port 11 pv_names = [] for xpm_num, xpm_port, _ in self.xpm_info: pvname = self.xpm_base + str( xpm_num) + ':' + style + 'LinkReset' + str(xpm_port) pv_names.append(pvname) print('*** xpm link resetting', pv_names) self.ctxt.put(pv_names, len(pv_names) * [1]) # unfortunately need to wait for the links to relock, which # matt says takes "an appreciable fraction of a second". # empirically, the links seem unreliable unless we wait 2s. time.sleep(2) def l0_count_reset(self): pvL0Reset = self.master_xpm_pv + 'GroupL0Reset' print('*** resetting l0 count', self.readout_group_mask) self.ctxt.put(pvL0Reset, self.readout_group_mask)
#!/usr/bin/env python from Dynamic_Common import getAddRecordName,getDynamicRecordName from p4p.client.thread import Context,Type,Value import numpy as np ctxt = Context('pva') pvAddRecord = ctxt.get(getAddRecordName()) print('pvAddRecord=',pvAddRecord) struct = Value(Type([ ('name', 's'), ('x', 'ad'), ('y', 'ad'), ('xmin', 'd'), ('xmax', 'd'), ('ymin', 'd'), ('ymax', 'd'), ])) pvAddRecord['argument']['recordName'] = getDynamicRecordName() pvAddRecord['argument']['union'] = struct ctxt.put(getAddRecordName(),pvAddRecord)
def hsd_config(connect_str, epics_prefix, cfgtype, detname, group): cfg = get_config(connect_str, cfgtype, detname) # this structure of epics variable names must mirror # the configdb. alternatively, we could consider # putting these in the configdb, perhaps as readonly fields. pvtable = { 'enable': 'enable', 'raw': { 'start': 'raw_start', 'gate': 'raw_gate', 'prescale': 'raw_prescale' }, 'fex': { 'start': 'fex_start', 'gate': 'fex_gate', 'prescale': 'fex_prescale', 'ymin': 'fex_ymin', 'ymax': 'fex_ymax', 'xpre': 'fex_xpre', 'xpost': 'fex_xpost' }, 'expert': { 'datamode': 'test_pattern', 'fullthresh': 'full_event', 'fullsize': 'full_size', 'fsrange': 'fs_range_vpp' }, } # look in the cfg dictionary for values that match the epics # variables in the pvtable values = {} epics_names_values(pvtable, cfg, values) values['readoutGroup'] = group # program the values ctxt = Context('pva') print(epics_prefix) ctxt.put(epics_prefix + ':READY', 0, wait=True) print(values) ctxt.put(epics_prefix + ':CONFIG', values, wait=True) # the completion of the "put" guarantees that all of the above # have completed (although in no particular order) complete = False for i in range(100): complete = ctxt.get(epics_prefix + ':READY') != 0 if complete: break print('hsd config wait for complete', i) time.sleep(0.1) if complete: print('hsd config complete') else: raise Exception('timed out waiting for hsd configure') ctxt.close() return json.dumps(cfg)
class ts_connector: def __init__(self, json_connect_info): self.connect_info = json.loads(json_connect_info) print('*** connect_info') pp = pprint.PrettyPrinter() pp.pprint(self.connect_info) control_info = self.connect_info['body']['control']['0'][ 'control_info'] self.xpm_base = control_info['pv_base'] + ':XPM:' master_xpm_num = control_info['xpm_master'] self.master_xpm_pv = self.xpm_base + str(master_xpm_num) + ':' self.ctxt = Context('pva') self.get_xpm_info() self.get_readout_group_mask() # unfortunately, the hsd needs the Rx link reset before the Tx, # otherwise we get CRC errors on the link. # try commenting this out since Matt has made the links more reliable #self.xpm_link_reset('Rx') #self.xpm_link_reset('Tx') # must come after clear readout because clear readout increments # the event counters, and the pgp eb needs them to start from zero # comment this out since it was moved to control.py #self.l0_count_reset() # enables listening to deadtime self.xpm_link_enable() self.ctxt.close() def get_readout_group_mask(self): self.readout_group_mask = 0 for _, _, readout_group in self.xpm_info: self.readout_group_mask |= (1 << readout_group) def get_xpm_info(self): self.xpm_info = [] # FIXME: cpo/weaver think this doesn't work for digitizers, # for example, where the DRP node can't learn which XPM port # is feeding it timing information. Currently think we should # try to get the information from the XPM side, instead of the # drp side. for key, node_info in self.connect_info['body']['drp'].items(): try: # FIXME: should have a better method to map xpm ip # address to xpm number (used to create pv names) xpm_id = int(node_info['connect_info']['xpm_id']) xpm_port = node_info['connect_info']['xpm_port'] readout_group = node_info['det_info']['readout'] self.xpm_info.append((xpm_id, xpm_port, readout_group)) except KeyError: pass def xpm_link_disable(self, pv, groups): pv_names = [] for xpm_port in range(14): pv_names.append(pv + 'RemoteLinkId' + str(xpm_port)) print('link_ids: {:}'.format(pv_names)) link_ids = self.ctxt.get(pv_names) pv_names = [] downstream_xpm_names = [] for xpm_port in range(14): pv_names.append(pv + 'LinkGroupMask' + str(xpm_port)) link_masks = self.ctxt.get(pv_names) for i in range(14): xlink = xpm_link(link_ids[i]) # this gets run for all xpm's "downstream" of the master xpm if xlink.is_xpm(): downstream_xpm_names.append(self.xpm_base + str(xlink.xpm_num())) self.xpm_link_disable( self.xpm_base + str(xlink.xpm_num()) + ':', groups) link_masks[ i] = 0xff # xpm to xpm links should be enabled for everything else: link_masks[i] &= ~groups self.ctxt.put(pv_names, link_masks) # this code disables the "master" feature for each of the # downstream xpm's for the readout groups used by the new xpm master pv_names_downstream_xpm_master_enable = [] for name in downstream_xpm_names: for igroup in range(8): if (1 << igroup) & groups: pv_names_downstream_xpm_master_enable.append( name + ':PART:%d:Master' % igroup) num_master_disable = len(pv_names_downstream_xpm_master_enable) if (num_master_disable): print('*** Disable downstream xpm readout group master:', pv_names_downstream_xpm_master_enable) self.ctxt.put(pv_names_downstream_xpm_master_enable, [0] * num_master_disable) def xpm_link_disable_all(self): # Start from the master and recursively remove the groups from each downstream link self.xpm_link_disable(self.master_xpm_pv, self.readout_group_mask) def xpm_link_enable(self): self.xpm_link_disable_all() d = {} for xpm_num, xpm_port, readout_group in self.xpm_info: pvname = self.xpm_base + str( xpm_num) + ':' + 'LinkGroupMask' + str(xpm_port) if pvname in d: d[pvname] |= (1 << readout_group) else: d[pvname] = (1 << readout_group) pv_names = [] values = [] for name, value in d.items(): pv_names.append(name) values.append(value) print('*** setting xpm link enables', pv_names, values) self.ctxt.put(pv_names, values) def xpm_link_reset(self, style): # make pv name that looks like DAQ:LAB2:XPM:1:RxLinkReset11 # for xpm_num 1 and xpm_port 11 pv_names = [] for xpm_num, xpm_port, _ in self.xpm_info: pvname = self.xpm_base + str( xpm_num) + ':' + style + 'LinkReset' + str(xpm_port) pv_names.append(pvname) print('*** xpm link resetting', pv_names) self.ctxt.put(pv_names, len(pv_names) * [1]) # unfortunately need to wait for the links to relock, which # matt says takes "an appreciable fraction of a second". # empirically, the links seem unreliable unless we wait 2s. time.sleep(2) def l0_count_reset(self): pvL0Reset = self.master_xpm_pv + 'GroupL0Reset' print('*** resetting l0 count', self.readout_group_mask) self.ctxt.put(pvL0Reset, self.readout_group_mask)