def __init__(self, init_file, name, enable_jitter=False, plot=False): self.name = name tao_lib = os.environ.get('TAO_LIB', '') self.tao = pytao.Tao(so_lib=tao_lib) L.debug("Initializing Tao...") if plot: self.tao.init("-init {init_file}".format(init_file=init_file)) else: self.tao.init("-noplot -init {init_file}".format(init_file=init_file)) L.debug("Tao initialization complete!") self.tao.cmd("set global lattice_calc_on = F") self.ctx = Context.instance() self.model_broadcast_socket = zmq.Context().socket(zmq.PUB) self.model_broadcast_socket.bind("tcp://*:{}".format(os.environ.get('MODEL_BROADCAST_PORT', 66666))) self.loop = asyncio.get_event_loop() self.jitter_enabled = enable_jitter self.twiss_table = NTTable([("element", "s"), ("device_name", "s"), ("s", "d"), ("length", "d"), ("p0c", "d"), ("alpha_x", "d"), ("beta_x", "d"), ("eta_x", "d"), ("etap_x", "d"), ("psi_x", "d"), ("alpha_y", "d"), ("beta_y", "d"), ("eta_y", "d"), ("etap_y", "d"), ("psi_y", "d")]) self.rmat_table = NTTable([("element", "s"), ("device_name", "s"), ("s", "d"), ("length", "d"), ("r11", "d"), ("r12", "d"), ("r13", "d"), ("r14", "d"), ("r15", "d"), ("r16", "d"), ("r21", "d"), ("r22", "d"), ("r23", "d"), ("r24", "d"), ("r25", "d"), ("r26", "d"), ("r31", "d"), ("r32", "d"), ("r33", "d"), ("r34", "d"), ("r35", "d"), ("r36", "d"), ("r41", "d"), ("r42", "d"), ("r43", "d"), ("r44", "d"), ("r45", "d"), ("r46", "d"), ("r51", "d"), ("r52", "d"), ("r53", "d"), ("r54", "d"), ("r55", "d"), ("r56", "d"), ("r61", "d"), ("r62", "d"), ("r63", "d"), ("r64", "d"), ("r65", "d"), ("r66", "d")]) initial_twiss_table, initial_rmat_table = self.get_twiss_table() sec, nanosec = divmod(float(time.time()), 1.0) initial_twiss_table = self.twiss_table.wrap(initial_twiss_table) initial_twiss_table['timeStamp']['secondsPastEpoch'] = sec initial_twiss_table['timeStamp']['nanoseconds'] = nanosec initial_rmat_table = self.rmat_table.wrap(initial_rmat_table) initial_rmat_table['timeStamp']['secondsPastEpoch'] = sec initial_rmat_table['timeStamp']['nanoseconds'] = nanosec self.live_twiss_pv = SharedPV(nt=self.twiss_table, initial=initial_twiss_table, loop=self.loop) self.design_twiss_pv = SharedPV(nt=self.twiss_table, initial=initial_twiss_table, loop=self.loop) self.live_rmat_pv = SharedPV(nt=self.rmat_table, initial=initial_rmat_table, loop=self.loop) self.design_rmat_pv = SharedPV(nt=self.rmat_table, initial=initial_rmat_table, loop=self.loop) self.recalc_needed = False self.pva_needs_refresh = False self.need_zmq_broadcast = False
def testStoreUpdate1Bad(self): "Attempt to use the name of an existing config" oldidx = self.conn.execute( 'insert into config(name, created, desc) values ("foo","","")' ).lastrowid conf = Value( NTTable.buildType([ ('channelName', 'as'), ('readonly', 'a?'), ('groupName', 'as'), ('tags', 'as'), ]), { 'labels': ['channelName', 'readonly', 'groupName', 'tags'], 'value': { 'channelName': ['one', 'two'], 'readonly': [False, True], 'groupName': ['A', ''], 'tags': ['', 'a, b'], }, }) self.assertRaises(RemoteError, self.S.storeServiceConfig, configname='foo', desc='desc', config=conf, system='xx')
def device_to_element(device_name, timeout=None): """Given a device name or list of device names, get the corresponding element name(s). Args: device_name (str or list of str): Device name(s) to convert to element name(s). You can also specify device name patterns, or a list of device name patterns to search for, using Oracle-style wildcard syntax (like "BPMS:BSYH:%") or regex patterns (like "BPMS:(BSYH|LTUH|UNDH):.*"). Returns: str or list of str: An element name or list of element names. """ was_single_string = False if isinstance(device_name, str): if re.search("[.^$*+?{}()[\],\\\/|%]", device_name) == None: #This is a plain-old device name, not a pattern. was_single_string = True device_name = [device_name] responses = [] for devname in device_name: response = directory_service_get(timeout=timeout, dname=devname, show="ename") responses.extend([row['name'] for row in NTTable.unwrap(response)]) flattened_responses = [] for item in responses: if isinstance(item, list): for name in item: flattened_responses.append(name) else: flattened_responses.append(item) if was_single_string: return flattened_responses[0] return flattened_responses
def element_to_device(element_name, timeout=None): """Given an element name or list of element names, get the corresponding device name(s). Args: element_name (str or list of str): Element name(s) to convert to device name(s). Note: Unlike :func:`device_to_element()`, the directory service does not support wildcards or regex patterns when converting from element names to device names. Returns: str or list of str: An element name or list of element names. """ was_single_string = False if isinstance(element_name, str): was_single_string = True element_name = [element_name] responses = [] for elename in element_name: response = directory_service_get(timeout=timeout, ename=elename, show="dname") responses.extend([row['name'] for row in NTTable.unwrap(response)]) flattened_responses = [] for item in responses: if isinstance(item, list): for name in item: flattened_responses.append(name) else: flattened_responses.append(item) if was_single_string: return flattened_responses[0] return flattened_responses
def testStoreUpdate2Bad(self): "Attempt to replace an inactive config" self.conn.executescript(""" insert into config(id, name, next, created, desc) values (3,"foo",NULL,"",""); insert into config(id, name, next, created, desc) values (2,"foo",3,"",""); """) oldidx = 2 conf = Value( NTTable.buildType([ ('channelName', 'as'), ('readonly', 'a?'), ('groupName', 'as'), ('tags', 'as'), ]), { 'labels': ['channelName', 'readonly', 'groupName', 'tags'], 'value': { 'channelName': ['one', 'two'], 'readonly': [False, True], 'groupName': ['A', ''], 'tags': ['', 'a, b'], }, }) self.assertRaises(RemoteError, self.S.storeServiceConfig, configname='foo', oldidx=str(oldidx), desc='desc', config=conf, system='xx')
def addPVT(name,t): table,n = toTable(t) init = toDictList(t,n) pv = SharedPV(initial=NTTable(table).wrap(init), handler=DefaultPVHandler()) provider.add(name,pv) return pv
def testStoreUpdate(self): oldidx = self.conn.execute( 'insert into config(name, created, desc) values ("foo","","")' ).lastrowid conf = Value( NTTable.buildType([ ('channelName', 'as'), ('readonly', 'a?'), ('groupName', 'as'), ('tags', 'as'), ]), { 'labels': ['channelName', 'readonly', 'groupName', 'tags'], 'value': { 'channelName': ['one', 'two'], 'readonly': [False, True], 'groupName': ['A', ''], 'tags': ['', 'a, b'], }, }) R = self.S.storeServiceConfig(configname='foo', oldidx=str(oldidx), desc='desc', config=conf, system='xx') configid = int(R.value.config_idx[0] ) # numpy.int32 -> int (so sqlite can bind it) self.assertListEqual([ ('config_idx', numpy.asarray([configid], dtype='i4')), ('config_name', [u'foo']), ('config_desc', [u'desc']), ('config_create_date', [u'2017-01-28 21:43:28']), ('config_version', [u'0']), ('status', [u'active']), ('system', [u'xx']), ], R.value.tolist()) self.assertNotEqual(oldidx, configid) self.assertListEqual( list( map( tuple, self.conn.execute( 'select id, name, next from config order by id'). fetchall())), [ (oldidx, u'foo', configid), # inactive (configid, u'foo', None), # active ])
def storeTestConfig(self, **kws): conf = Value( NTTable.buildType([ ('channelName', 'as'), ('readonly', 'a?'), ('groupName', 'as'), ('tags', 'as'), ]), { 'labels': ['channelName', 'readonly', 'groupName', 'tags'], 'value': { 'channelName': ['pv:f64:1', 'pv:i32:2', 'pv:str:3', 'pv:bad:4'], 'readonly': [False, False, False, False], 'groupName': ['A', '', '', ''], 'tags': ['', 'a, b', '', ''], }, }) return self.storeServiceConfig(config=conf, **kws)
def __init__(self): tao_lib = os.environ.get('TAO_LIB', '') self.tao = pytao.Tao(so_lib=tao_lib) path_to_lattice = os.path.join( os.path.dirname(os.path.realpath(__file__)), "lcls.lat") path_to_init = os.path.join( os.path.dirname(os.path.realpath(__file__)), "tao.init") self.tao.init("-noplot -lat {lat_path} -init {init_path}".format( lat_path=path_to_lattice, init_path=path_to_init)) self.ctx = Context.instance() self.model_broadcast_socket = zmq.Context().socket(zmq.PUB) self.model_broadcast_socket.bind("tcp://*:{}".format( os.environ.get('MODEL_BROADCAST_PORT', 66666))) self.loop = asyncio.get_event_loop() model_table = NTTable([("element", "s"), ("device_name", "s"), ("s", "d"), ("length", "d"), ("p0c", "d"), ("alpha_x", "d"), ("beta_x", "d"), ("eta_x", "d"), ("etap_x", "d"), ("psi_x", "d"), ("alpha_y", "d"), ("beta_y", "d"), ("eta_y", "d"), ("etap_y", "d"), ("psi_y", "d"), ("r11", "d"), ("r12", "d"), ("r13", "d"), ("r14", "d"), ("r15", "d"), ("r16", "d"), ("r21", "d"), ("r22", "d"), ("r23", "d"), ("r24", "d"), ("r25", "d"), ("r26", "d"), ("r31", "d"), ("r32", "d"), ("r33", "d"), ("r34", "d"), ("r35", "d"), ("r36", "d"), ("r41", "d"), ("r42", "d"), ("r43", "d"), ("r44", "d"), ("r45", "d"), ("r46", "d"), ("r51", "d"), ("r52", "d"), ("r53", "d"), ("r54", "d"), ("r55", "d"), ("r56", "d"), ("r61", "d"), ("r62", "d"), ("r63", "d"), ("r64", "d"), ("r65", "d"), ("r66", "d")]) initial_table = self.get_twiss_table() self.live_twiss_pv = SharedPV(nt=model_table, initial=initial_table, loop=self.loop) self.design_twiss_pv = SharedPV(nt=model_table, initial=initial_table, loop=self.loop) self.pva_needs_refresh = False self.need_zmq_broadcast = False
def setUp(self): self.conn = connect(':memory:') self.S = Service(conn=self.conn, gather=self.gather, sim=True) self.S.simtime = (2017, 1, 28, 21, 43, 28, 5, 28, 0) conf = Value( NTTable.buildType([ ('channelName', 'as'), ('readonly', 'a?'), ('groupName', 'as'), ('tags', 'as'), ]), { 'labels': ['channelName', 'readonly', 'groupName', 'tags'], 'value': { 'channelName': ['one'], 'readonly': [False], 'groupName': [''], 'tags': [''], }, }) R = self.S.storeServiceConfig(configname='first', desc='desc', config=conf, system='xx') self.configid = int(R.value.config_idx[0] ) # numpy.int32 -> int (so sqlite can bind it) S = self.S.saveSnapshot(configname='first', user='******', desc='bar') self.eid1 = S['timeStamp']['userTag'] self.S.simtime = (2017, 1, 28, 21, 44, 28, 5, 28, 0) S = self.S.saveSnapshot(configname='first', user='******', desc='bar') self.eid2 = S['timeStamp']['userTag'] self.S.simtime = (2017, 1, 28, 21, 45, 28, 5, 28, 0) S = self.S.saveSnapshot(configname='first', user='******', desc='bar') self.eid3 = S['timeStamp']['userTag']
def _list(pattern, tag=None, sort_by=None, element_type=None, show=None, timeout=None): """Gets a list of PVs, device names, or element names from the directory service. Args: pattern (str): A pattern to search for. The pattern can use an Oracle-style wildcard syntax, like this: "BPMS:BSY:%:X", where % is the wildcard symbol, or a regular expression, like this: "(XCOR|BPMS):.*". tag (str, optional): A tag to filter the results by. Tags come from the model system, and are used to group a region of devices. Some commonly-used tags: L1, L2, L3, BSY, LTU, UND, DUMPLINE. sort_by (str, optional): A property to sort the reults by. sort_by="z" is very commonly used. element_type (str, optional): An element type to filter the results by. For example, you can show only instruments by passing element_type="INST". show (str, optional): The type of name to return. When 'show' is not specified, a list of PVs (something like "BPMS:LI24:801:X") will be returned. When 'show' is 'dname', a list of device names (something like "BPMS:LI24:801") will be returned . When 'show' is 'ename', a list of element names (something like "BPM24801") will be returned. Returns: list of str: A list of names matching the parameters sent. """ response = directory_service_get(timeout=timeout, name=pattern, tag=tag, sort=sort_by, etype=element_type, show=show) return [row['name'] for row in NTTable.unwrap(response)]
class ModelService: def __init__(self, init_file, name, enable_jitter=False, plot=False): self.name = name tao_lib = os.environ.get('TAO_LIB', '') self.tao = pytao.Tao(so_lib=tao_lib) L.debug("Initializing Tao...") if plot: self.tao.init("-init {init_file}".format(init_file=init_file)) else: self.tao.init( "-noplot -init {init_file}".format(init_file=init_file)) L.debug("Tao initialization complete!") self.tao.cmd("set global lattice_calc_on = F") self.tao.cmd('set global var_out_file = " "') self.ctx = Context.instance() self.model_broadcast_socket = zmq.Context().socket(zmq.PUB) self.model_broadcast_socket.bind("tcp://*:{}".format( os.environ.get('MODEL_BROADCAST_PORT', 66666))) self.loop = asyncio.get_event_loop() self.jitter_enabled = enable_jitter self.twiss_table = NTTable([("element", "s"), ("device_name", "s"), ("s", "d"), ("length", "d"), ("p0c", "d"), ("alpha_x", "d"), ("beta_x", "d"), ("eta_x", "d"), ("etap_x", "d"), ("psi_x", "d"), ("alpha_y", "d"), ("beta_y", "d"), ("eta_y", "d"), ("etap_y", "d"), ("psi_y", "d")]) self.rmat_table = NTTable([("element", "s"), ("device_name", "s"), ("s", "d"), ("length", "d"), ("r11", "d"), ("r12", "d"), ("r13", "d"), ("r14", "d"), ("r15", "d"), ("r16", "d"), ("r21", "d"), ("r22", "d"), ("r23", "d"), ("r24", "d"), ("r25", "d"), ("r26", "d"), ("r31", "d"), ("r32", "d"), ("r33", "d"), ("r34", "d"), ("r35", "d"), ("r36", "d"), ("r41", "d"), ("r42", "d"), ("r43", "d"), ("r44", "d"), ("r45", "d"), ("r46", "d"), ("r51", "d"), ("r52", "d"), ("r53", "d"), ("r54", "d"), ("r55", "d"), ("r56", "d"), ("r61", "d"), ("r62", "d"), ("r63", "d"), ("r64", "d"), ("r65", "d"), ("r66", "d")]) initial_twiss_table, initial_rmat_table = self.get_twiss_table() sec, nanosec = divmod(float(time.time()), 1.0) initial_twiss_table = self.twiss_table.wrap(initial_twiss_table) initial_twiss_table['timeStamp']['secondsPastEpoch'] = sec initial_twiss_table['timeStamp']['nanoseconds'] = nanosec initial_rmat_table = self.rmat_table.wrap(initial_rmat_table) initial_rmat_table['timeStamp']['secondsPastEpoch'] = sec initial_rmat_table['timeStamp']['nanoseconds'] = nanosec self.live_twiss_pv = SharedPV(nt=self.twiss_table, initial=initial_twiss_table, loop=self.loop) self.design_twiss_pv = SharedPV(nt=self.twiss_table, initial=initial_twiss_table, loop=self.loop) self.live_rmat_pv = SharedPV(nt=self.rmat_table, initial=initial_rmat_table, loop=self.loop) self.design_rmat_pv = SharedPV(nt=self.rmat_table, initial=initial_rmat_table, loop=self.loop) self.recalc_needed = False self.pva_needs_refresh = False self.need_zmq_broadcast = False def start(self): L.info("Starting %s Model Service.", self.name) pva_server = PVAServer(providers=[{ f"SIMULACRUM:SYS0:1:{self.name}:LIVE:TWISS": self.live_twiss_pv, f"SIMULACRUM:SYS0:1:{self.name}:DESIGN:TWISS": self.design_twiss_pv, f"SIMULACRUM:SYS0:1:{self.name}:LIVE:RMAT": self.live_rmat_pv, f"SIMULACRUM:SYS0:1:{self.name}:DESIGN:RMAT": self.design_rmat_pv, }]) try: zmq_task = self.loop.create_task(self.recv()) pva_refresh_task = self.loop.create_task(self.refresh_pva_table()) broadcast_task = self.loop.create_task( self.broadcast_model_changes()) jitter_task = self.loop.create_task(self.add_jitter()) self.loop.run_forever() except KeyboardInterrupt: L.info("Shutting down Model Service.") zmq_task.cancel() pva_refresh_task.cancel() broadcast_task.cancel() pva_server.stop() finally: self.loop.close() L.info("Model Service shutdown complete.") def get_twiss_table(self): """ Queries Tao for model and RMAT info. Returns: A (twiss_table, rmat_table) tuple. """ start_time = time.time() #First we get a list of all the elements. #NOTE: the "-no_slaves" option for python lat_list only works in Tao 2019_1112 or above. element_name_list = self.tao.cmd( "python lat_list -track_only 1@0>>*|model ele.name") L.debug(element_name_list) for row in element_name_list: assert "ERROR" not in element_name_list, "Fetching element names failed. This is probably because a version of Tao older than 2019_1112 is being used." last_element_index = 0 for i, row in enumerate(reversed(element_name_list)): if row == "END": last_element_index = len(element_name_list) - 1 - i break element_data = {} attrs = ("ele.s", "ele.l", "orbit.energy", "ele.a.alpha", "ele.a.beta", "ele.x.eta", "ele.x.etap", "ele.a.phi", "ele.b.alpha", "ele.b.beta", "ele.y.eta", "ele.y.etap", "ele.b.phi", "ele.mat6") for attr in attrs: element_data[attr] = self.tao.cmd_real( "python lat_list -track_only 1@0>>*|model real:{}".format( attr)) if attr == 'ele.mat6': element_data[attr] = element_data[attr].reshape((-1, 6, 6)) assert len(element_data[attr]) == len( element_name_list ), "Number of elements in model data for {} doesn't match number of element names.".format( attr) combined_rmat = np.identity(6) twiss_table_rows = [] rmat_table_rows = [] for i in range(0, last_element_index + 1): element_name = element_name_list[i] try: device_name = simulacrum.util.convert_element_to_device( element_name.split("#")[0]) except KeyError: device_name = "" element_rmat = element_data['ele.mat6'][i] rmat = np.matmul(element_rmat, combined_rmat) combined_rmat = rmat twiss_table_rows.append({ "element": element_name, "device_name": device_name, "s": element_data['ele.s'][i], "length": element_data['ele.l'][i], "p0c": element_data['orbit.energy'][i], "alpha_x": element_data['ele.a.alpha'][i], "beta_x": element_data['ele.a.beta'][i], "eta_x": element_data['ele.x.eta'][i], "etap_x": element_data['ele.x.etap'][i], "psi_x": element_data['ele.a.phi'][i], "alpha_y": element_data['ele.b.alpha'][i], "beta_y": element_data['ele.b.beta'][i], "eta_y": element_data['ele.y.eta'][i], "etap_y": element_data['ele.y.etap'][i], "psi_y": element_data['ele.b.phi'][i] }) rmat_table_rows.append({ "element": element_name, "device_name": device_name, "s": element_data['ele.s'][i], "length": element_data['ele.l'][i], "r11": rmat[0, 0], "r12": rmat[0, 1], "r13": rmat[0, 2], "r14": rmat[0, 3], "r15": rmat[0, 4], "r16": rmat[0, 5], "r21": rmat[1, 0], "r22": rmat[1, 1], "r23": rmat[1, 2], "r24": rmat[1, 3], "r25": rmat[1, 4], "r26": rmat[1, 5], "r31": rmat[2, 0], "r32": rmat[2, 1], "r33": rmat[2, 2], "r34": rmat[2, 3], "r35": rmat[2, 4], "r36": rmat[2, 5], "r41": rmat[3, 0], "r42": rmat[3, 1], "r43": rmat[3, 2], "r44": rmat[3, 3], "r45": rmat[3, 4], "r46": rmat[3, 5], "r51": rmat[4, 0], "r52": rmat[4, 1], "r53": rmat[4, 2], "r54": rmat[4, 3], "r55": rmat[4, 4], "r56": rmat[4, 5], "r61": rmat[5, 0], "r62": rmat[5, 1], "r63": rmat[5, 2], "r64": rmat[5, 3], "r65": rmat[5, 4], "r66": rmat[5, 5] }) end_time = time.time() L.debug("get_twiss_table took %f seconds", end_time - start_time) return twiss_table_rows, rmat_table_rows async def refresh_pva_table(self): """ This loop continuously checks if the PVAccess table needs to be refreshed, and publishes a new table if it does. The pva_needs_refresh flag is usually set when a tao command beginning with 'set' occurs. """ while True: if self.pva_needs_refresh: sec, nanosec = divmod(float(time.time()), 1.0) new_twiss_table, new_rmat_table = self.get_twiss_table() new_twiss_table = self.twiss_table.wrap(new_twiss_table) new_twiss_table['timeStamp']['secondsPastEpoch'] = sec new_twiss_table['timeStamp']['nanoseconds'] = nanosec new_rmat_table = self.rmat_table.wrap(new_rmat_table) new_rmat_table['timeStamp']['secondsPastEpoch'] = sec new_rmat_table['timeStamp']['nanoseconds'] = nanosec self.live_twiss_pv.post(new_twiss_table) self.live_rmat_pv.post(new_rmat_table) self.pva_needs_refresh = False await asyncio.sleep(1.0) async def add_jitter(self): while True: if self.jitter_enabled: x0 = np.random.normal(0.0, 0.12 * 0.001) y0 = np.random.normal(0.0, 0.12 * 0.001) self.tao.cmd(f"set particle_start x = {x0}") self.tao.cmd(f"set particle_start y = {y0}") self.recalc_needed = True self.need_zmq_broadcast = True await asyncio.sleep(1.0) async def broadcast_model_changes(self): """ This loop broadcasts new orbits, twiss parameters, etc. over ZMQ. """ while True: if self.recalc_needed: self.tao.cmd("set global lattice_calc_on = T") self.tao.cmd("set global lattice_calc_on = F") self.recalc_needed = False if self.need_zmq_broadcast: try: self.send_orbit() except Exception as e: L.warning("SEND ORBIT FAILED: %s", e) try: self.send_profiles_data() except Exception as e: L.warning("SEND PROF DATA FAILED: %s", e) try: self.send_und_twiss() except Exception as e: L.warning("SEND UND TWISS FAILED: %s", e) self.need_zmq_broadcast = False await asyncio.sleep(0.1) def model_changed(self): self.recalc_needed = True self.pva_needs_refresh = True self.need_zmq_broadcast = True def get_orbit(self): start_time = time.time() #Get X Orbit x_orb_text = self.tao_cmd("show data orbit.x")[3:-2] x_orb = _orbit_array_from_text(x_orb_text) #Get Y Orbit y_orb_text = self.tao_cmd("show data orbit.y")[3:-2] y_orb = _orbit_array_from_text(y_orb_text) #Get e_tot, which we use to see if the single particle beam is dead e_text = self.tao_cmd("show data orbit.e")[3:-2] e = _orbit_array_from_text(e_text) end_time = time.time() L.debug("get_orbit took %f seconds", end_time - start_time) return np.stack((x_orb, y_orb, e)) def get_prof_orbit(self): #Get X Orbit x_orb_text = self.tao_cmd("show data orbit.profx")[3:-2] x_orb = _orbit_array_from_text(x_orb_text) #Get Y Orbit y_orb_text = self.tao_cmd("show data orbit.profy")[3:-2] y_orb = _orbit_array_from_text(y_orb_text) return np.stack((x_orb, y_orb)) def get_twiss(self): twiss_text = self.tao_cmd( "show lat -no_label_lines -at alpha_a -at beta_a -at alpha_b -at beta_b UNDSTART" ) if "ERROR" in twiss_text[0]: twiss_text = self.tao_cmd( "show lat -no_label_lines -at alpha_a -at beta_a -at alpha_b -at beta_b BEGUNDH" ) if "ERROR" in twiss_text[0]: twiss_text = self.tao_cmd( "show lat -no_label_lines -at alpha_a -at beta_a -at alpha_b -at beta_b BEGUNDS" ) #format to list of comma separated values #msg='twiss from get_twiss: {}'.format(twiss_text) #L.info(msg) twiss = twiss_text[0].split() return twiss def old_get_orbit(self): #Get X Orbit x_orb_text = self.tao_cmd( "python lat_list 1@0>>BPM*|model orbit.vec.1") x_orb = _orbit_array_from_text(x_orb_text) #Get Y Orbit y_orb_text = self.tao_cmd( "python lat_list 1@0>>BPM*|model orbit.vec.3") y_orb = _orbit_array_from_text(y_orb_text) return np.stack((x_orb, y_orb)) #information broadcast by the model is sent as two separate messages: #metadata message: sent first with 1) tag describing data for services to filter on, 2) type -optional, 3) size -optional #data message: sent either as a python object or a series of bits def send_orbit(self): orb = self.get_orbit() metadata = { "tag": "orbit", "dtype": str(orb.dtype), "shape": orb.shape } self.model_broadcast_socket.send_pyobj(metadata, zmq.SNDMORE) self.model_broadcast_socket.send(orb) def send_profiles_data(self): twiss_text = self.tao_cmd( "show lat -no_label_lines -at beta_a -at beta_b -at e_tot Monitor::OTR*,Monitor::YAG*" ) prof_beta_x = [float(l.split()[5]) for l in twiss_text] prof_beta_y = [float(l.split()[6]) for l in twiss_text] prof_e = [float(l.split()[7]) for l in twiss_text] prof_names = [l.split()[1] for l in twiss_text] prof_orbit = self.get_prof_orbit() prof_data = np.concatenate( (prof_orbit, np.array([prof_beta_x, prof_beta_y, prof_e, prof_names]))) metadata = { "tag": "prof_data", "dtype": str(prof_data.dtype), "shape": prof_data.shape } self.model_broadcast_socket.send_pyobj(metadata, zmq.SNDMORE) self.model_broadcast_socket.send(prof_data) def send_particle_positions(self): twiss_text = self.tao_cmd( "show lat -no_label_lines -at beta_a -at beta_b -at e_tot Monitor::OTR*,Monitor::YAG*" ) prof_names = [l.split()[1] for l in twiss_text] positions_all = {} for screen in prof_names: positions = self.get_particle_positions(screen) if not positions: continue positions_all[screen] = [[ float(position.split()[1]), float(position.split()[3]) ] for position in positions] metadata = {"tag": "part_positions"} self.model_broadcast_socket.send_pyobj(metadata, zmq.SNDMORE) self.model_broadcast_socket.send_pyobj(positions_all) def get_particle_positions(self, screen): L.debug("Getting particle positions") cmd = "show particle -all -ele {screen}".format(screen=screen) results = self.tao_cmd(cmd) if (len(results) < 3): return False return results[2:] def send_und_twiss(self): twiss = self.get_twiss() metadata = {"tag": "und_twiss"} self.model_broadcast_socket.send_pyobj(metadata, zmq.SNDMORE) self.model_broadcast_socket.send_pyobj(twiss) def tao_cmd(self, cmd): if cmd.startswith("exit"): return "Please stop trying to exit the model service's Tao, you jerk!" result = self.tao.cmd(cmd) if cmd.startswith("set"): self.model_changed() return result def tao_batch(self, cmds): L.info("Starting command batch.") results = [self.tao_cmd(cmd) for cmd in cmds] L.info("Batch complete.") return results async def recv(self): s = self.ctx.socket(zmq.REP) s.bind("tcp://*:{}".format(os.environ.get('MODEL_PORT', "12312"))) while True: p = await s.recv_pyobj() msg = "Got a message: {}".format(p) L.debug(msg) if p['cmd'] == 'tao': try: retval = self.tao_cmd(p['val']) await s.send_pyobj({'status': 'ok', 'result': retval}) except Exception as e: await s.send_pyobj({'status': 'fail', 'err': e}) elif p['cmd'] == 'send_orbit': self.model_changed( ) #Sets the flag that will cause an orbit broadcast await s.send_pyobj({'status': 'ok'}) elif p['cmd'] == 'echo': await s.send_pyobj({'status': 'ok', 'result': p['val']}) elif p['cmd'] == 'send_profiles_twiss': self.model_changed( ) #Sets the flag that will cause a prof broadcast #self.send_profiles_twiss() #self.send_prof_orbit() await s.send_pyobj({'status': 'ok'}) elif p['cmd'] == 'send_und_twiss': self.model_changed( ) #Sets the flag that will cause an und twiss broadcast #self.send_und_twiss() await s.send_pyobj({'status': 'ok'}) elif p['cmd'] == 'tao_batch': try: results = self.tao_batch(p['val']) await s.send_pyobj({'status': 'ok', 'result': results}) except Exception as e: await s.send_pyobj({'status': 'fail', 'err': e})
def main(self): cli = Context() pvs = {} # table of detected "features" self.features = pvs[args.output + 'features'] = SharedPV( nt=NTTable(columns=[ ('X', 'd'), ('Y', 'd'), ('W', 'd'), ('H', 'd'), ('idx', 'd'), ]), initial=[]) # output image (example) self.imgOut = pvs[args.output + 'img'] = SharedPV(nt=NTNDArray(), initial=np.zeros( (0, 0), dtype='u1')) # display execution time self.execTime = pvs[args.output + 'etime'] = SharedPV( nt=NTScalar('d', display=True), initial={ 'value': 0.0, 'display.units': 's', }) # background threshold level bg = pvs[args.output + 'bg'] = SharedPV(nt=NTScalar('I', display=True), initial={ 'value': self.bgLvl, 'display.units': 'px', }) @bg.put def set_bg(pv, op): self.bgLvl = max(1, int(op.value())) pv.post(self.bgLvl) op.done() # image flattening mode imode = pvs[args.output + 'imode'] = SharedPV( nt=NTEnum(), initial={'choices': [e.name for e in ImageMode]}) @imode.put def set_imode(pv, op): self.imode = ImageMode(op.value()) pv.post(self.imode) op.done() # separately publish info of largest feature self.X = pvs[args.output + 'x'] = SharedPV(nt=NTScalar('d'), initial=0.0) self.Y = pvs[args.output + 'y'] = SharedPV(nt=NTScalar('d'), initial=0.0) self.W = pvs[args.output + 'w'] = SharedPV(nt=NTScalar('d'), initial=0.0) self.H = pvs[args.output + 'h'] = SharedPV(nt=NTScalar('d'), initial=0.0) print("Output PVs", list(pvs.keys())) # subscribe to input image PV and run local server with cli.monitor(self.args.input, self.on_image, request='record[pipeline=true,queueSize=2]'), Server( providers=[pvs]): # park while work happens in other tasks done = threading.Event() signal.signal(signal.SIGINT, lambda x, y: done.set()) done.wait()
def testStoreFirst(self): conf = Value( NTTable.buildType([ ('channelName', 'as'), ('readonly', 'a?'), ('groupName', 'as'), ('tags', 'as'), ]), { 'labels': ['channelName', 'readonly', 'groupName', 'tags'], 'value': { 'channelName': ['one', 'two'], 'readonly': [False, True], 'groupName': ['A', ''], 'tags': ['', 'a, b'], }, }) ######### Store R = self.S.storeServiceConfig(configname='first', desc='desc', config=conf, system='xx') self.assertIsInstance(R, Value) self.assertListEqual(R.labels, [ u'config_idx', u'config_name', u'config_desc', u'config_create_date', u'config_version', u'status', u'system' ]) configid = int(R.value.config_idx[0] ) # numpy.int32 -> int (so sqlite can bind it) self.assertListEqual([ ('config_idx', numpy.asarray([configid], dtype='i4')), ('config_name', [u'first']), ('config_desc', [u'desc']), ('config_create_date', [u'2017-01-28 21:43:28']), ('config_version', [u'0']), ('status', [u'active']), ('system', [u'xx']), ], R.value.tolist()) ######### verify DB R = self.conn.execute( 'select * from config where id is not NULL;').fetchone() self.assertEqual(R['id'], configid) self.assertEqual(R['name'], 'first') self.assertIsNone(R['next']) R = list( map( tuple, self.conn.execute( 'select name, readonly, groupName, tags from config_pv where config=?;', (configid, )).fetchall())) self.assertListEqual(R, [ (u'one', 0, u'A', u''), (u'two', 1, u'', u'a, b'), ]) ######### Load R = self.S.loadServiceConfig(configid=str(configid)) self.assertIsInstance(R, Value) self.assertListEqual(R.labels, conf.labels) self.assertListEqual(R.value.channelName, conf.value.channelName) assert_equal(R.value.readonly, conf.value.readonly) self.assertListEqual(R.value.groupName, conf.value.groupName) self.assertListEqual(R.value.tags, conf.value.tags) ######### Query R = self.S.retrieveServiceConfigs(configname='*') self.assertIsInstance(R, Value) self.assertListEqual(R.labels, [ u'config_idx', u'config_name', u'config_desc', u'config_create_date', u'config_version', u'status', u'system' ]) self.assertListEqual(R.value.tolist(), [ ('config_idx', numpy.asarray([configid])), ('config_name', [u'first']), ('config_desc', [u'desc']), ('config_create_date', [u'2017-01-28 21:43:28']), ('config_version', [u'0']), ('status', [u'active']), ('system', [u'xx']), ]) R = self.S.retrieveServiceConfigProps() self.assertIsInstance(R, Value) self.assertListEqual( R.labels, [u'config_prop_id', u'config_idx', u'system_key', u'system_val']) self.assertListEqual(R.value.tolist(), [ ('config_prop_id', numpy.asarray([1], dtype=numpy.int32)), ('config_idx', numpy.asarray([configid], dtype=numpy.int32)), ('system_key', [u'system']), ('system_val', [u'xx']), ])
from p4p import Type, Value from p4p.nt import NTTable from p4p.rpc import rpc, quickRPCServer single_pv_struct = NTTable([("secondsPastEpoch", "l"), ("values", "d"), ("nanoseconds", "i"), ("severity", "i"), ("status", "i")]) """ pv_data = pvaccess.PvObject({"value": {"pvName": pvaccess.STRING, "value": single_pv_struct}},{"value": {"pvName": pv, "value": single_pv_value}}) """ multi_pv_struct = Type([ ("pvName", "s"), ("value", ("S", "NTComplexTable", single_pv_struct.type.items())) ]) multi_response_struct = Type([ ("value", "av") ]) class ArchiveTester(object): @rpc(None) def hist(self, **kws): pv = kws['pv'] value = [{"secondsPastEpoch": 1, "values": 123.45, "nanoseconds": 0, "severity": 0, "status": 0}] pv_list = pv.split(",") if len(pv_list) == 1: return single_pv_struct.wrap(value) else: result_list = [] for p in pv_list: pv_data = Value(multi_pv_struct, {"pvName": p, "value": single_pv_struct.wrap(value)})
class Service(object): def __init__(self, conn, gather=None, sim=False): self.conn = conn self.gather = gather if not sim: # current time string (UTC) self.now = lambda: time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime( )) else: # posix time 1485639808 self.simtime = (2017, 1, 28, 21, 43, 28, 5, 28, 0) self.now = lambda self=self: time.strftime('%Y-%m-%d %H:%M:%S', self.simtime) def _getConfig(self, C): # expect previous 'select' with: id, name, created, active, next, desc, system ret = [] for id, name, created, active, next, desc, system in C.fetchall(): ret.append({ 'config_idx': id, 'config_name': name, 'config_create_date': created, 'config_desc': desc, 'config_version': u'0', 'status': 'active' if next is None and active != 0 else 'inactive', 'system': system or '', }) return ret @rpc(configTable) def storeServiceConfig(self, configname, oldidx=None, desc='', config=None, system=None): with self.conn as conn: C = conn.cursor() _log.debug("storeServiceConfig() %s(%s)", configname, oldidx) C.execute( 'insert into config(name, desc, created, system) values (?,?,?,?);', (configname, desc, self.now(), system)) newidx = C.lastrowid if oldidx is not None: # update existing row only if it was previously active C.execute( 'update config set next=? where id=? and next is NULL', (newidx, int(oldidx))) if C.execute( 'select count(*) from config where name=? and next is NULL and active=1', (configname, )).fetchone()[0] != 1: # rollback will undo our insert raise RemoteError( "Provided configname and oldidx ('%s' and %s) are not consistent" % (configname, oldidx)) for name, ro, group, tags in izip_longest( config.value.channelName, config.get('value.readonly', []), config.get('value.groupName', []), config.get('value.tags', []), ): C.execute( 'insert into config_pv(config, name, readonly, groupName, tags) VALUES (?,?,?,?,?);', (newidx, name, int(ro or 0), group or '', tags or '')) C.execute( """select id, name, created, active, next, desc, system from config where id=?; """, (newidx, )) _log.info("Store configuration %s as %s (old %s)", configname, newidx, oldidx) return self._getConfig(C) @rpc( NTTable([ ('channelName', 's'), ('readonly', '?'), ('groupName', 's'), ('tags', 's'), ])) def loadServiceConfig(self, configid=None): configid = int(configid) with self.conn as conn: C = conn.cursor() R = C.execute('select id from config where id=?', (configid, )).fetchone() if R is None: raise RemoteError("Unknown configid %s" % configid) C.execute( 'select name as channelName, tags, groupName, readonly from config_pv where config=?;', (int(configid), )) _log.debug("Fetch configuration %s", configid) return C.fetchall() @rpc(configTable) def modifyServiceConfig(self, configid=None, status=None): configid = int(configid) if status not in (None, 'active', 'inactive'): raise RemoteError("Unsupported status '%s'" % status) with self.conn as conn: C = conn.cursor() R = C.execute('select active, next from config where id=?', (int(configid), )).fetchone() if R is None: raise RemoteError("Unknown configid %s" % configid) if R['next'] is not None: raise RemoteError("Can't modify superceeded configuration") cond = [] vals = [configid] if status == 'active': cond.append('active=1') elif status == 'inactive': cond.append('active=0') if len(cond) > 0: cond = ' and '.join(cond) C.execute( 'update config set %s where id=? and next is NULL' % cond, vals) C.execute( """select id, name, created, active, next, desc, system from config where id=?; """, (configid, )) return self._getConfig(C) @rpc(configTable) def retrieveServiceConfigs(self, servicename=None, configname=None, configversion=None, system=None, eventid=None, status=None): if servicename not in (None, u'', u'masar'): _log.warning("Service names not supported") return [] if status not in (None, u'', u'active', u'inactive'): _log.warning("Request matching unknown status %s", status) return [] with self.conn as conn: C = conn.cursor() cond = [] vals = [] _log.debug( "retrieveServiceConfigs() configname=%s configverson=%s system=%s", configname, configversion, system) if eventid not in (None, u'', u'*', 0): C.execute('select config from event where id=?', (int(eventid), )) idx = C.fetchone()['config'] cond.append('id=?') vals.append(idx) else: if status == u'active': cond.append('next is NULL') elif status == u'inactive': cond.append('next is not NULL') if configname not in (None, u'', u'*', u'all'): cond.append('name glob ?') vals.append(configname) if system not in (None, u'', u'*', u'all'): cond.append('system glob ?') vals.append(system) if len(cond) > 0: cond = 'where ' + (' and '.join(cond)) else: cond = '' _log.debug('retrieveServiceConfigs() w/ %s %s', cond, vals) C.execute( """select id, name, created, active, next, desc, system from config %s; """ % cond, vals) return self._getConfig(C) @rpc( NTTable([ ('config_prop_id', 'i'), ('config_idx', 'i'), ('system_key', 's'), ('system_val', 's'), ])) def retrieveServiceConfigProps(self, propname=None, servicename=None, configname=None): if servicename not in (None, '*', 'masar') or propname not in (None, '*', 'system'): _log.warning( "Service names or non 'system' prop names not supported (%s, %s)", servicename, propname) return [] with self.conn as conn: C = conn.cursor() cond = [] vals = [] if configname not in (None, u'', u'*'): cond.append('name glob ?') vals.append(configname) if len(cond) > 0: cond = 'where ' + (' and '.join(cond)) else: cond = '' _log.debug("retrieveServiceConfigProps() %s %s", cond, vals) C.execute('select id, system from config %s' % cond, vals) R = [] for id, system in C.fetchall(): if system is None: continue R.append({ 'config_prop_id': 1, 'config_idx': id, 'system_key': 'system', 'system_val': system, }) return R @rpc( NTTable([ ('event_id', 'i'), ('config_id', 'i'), ('comments', 's'), ('event_time', 's'), ('user_name', 's'), ])) def retrieveServiceEvents(self, configid=None, start=None, end=None, comment=None, user=None, eventid=None): with self.conn as conn: C = conn.cursor() cond = ['user is not NULL'] vals = [] if user not in (None, u'', u'*'): cond.append('user glob ?') vals.append(user) if comment not in (None, u'', u'*'): cond.append('comment glob ?') vals.append(comment) if configid not in (None, u'*'): cond.append('config=?') vals.append(int(configid)) # HACK instead of using julianday(), use lexial comparison, # which should produce the same result as we always store time # as "YYYY-MM-DD HH:MM:SS" if start not in (None, u''): cond.append("event_time>=?") vals.append(normtime(start)) if end not in (None, u''): cond.append("event_time<?") vals.append(normtime(end)) if len(cond) > 0: cond = 'where ' + (' and '.join(cond)) else: cond = '' _log.debug("retrieveServiceEvents() %s %s", cond, vals) C.execute( """select id as event_id, config as config_id, comment as comments, created as event_time, user as user_name from event %s order by config_id, event_time """ % cond, vals) return C.fetchall() @rpc(multiType) def retrieveSnapshot(self, eventid=None, start=None, end=None, comment=None): # start, end, and comment ignored eventid = int(eventid) with self.conn as conn: C = conn.cursor() _log.debug("retrieveSnapshot() %d", eventid) C.execute('select created from event where id=?', (eventid, )) S, NS = timestr2tuple(C.fetchone()[0]) C.execute( """select name, tags, groupName, readonly, dtype, severity, status, time, timens, value from event_pv inner join config_pv on event_pv.pv = config_pv.id where event_pv.event = ? """, (eventid, )) L = C.fetchall() sevr = list(map(itemgetter('severity'), L)) def unpack(I): try: return decodeValue(I['value']) except Exception as e: raise ValueError("Error decoding %s: %s" % (type(I['value']), e)) return { 'channelName': list(map(itemgetter('name'), L)), 'value': list(map(unpack, L)), 'severity': sevr, 'isConnected': list(map(lambda S: S <= 3, sevr)), 'status': list(map(itemgetter('status'), L)), 'secondsPastEpoch': list(map(itemgetter('time'), L)), 'nanoseconds': list(map(itemgetter('timens'), L)), 'dbrType': list(map(itemgetter('dtype'), L)), 'groupName': list(map(itemgetter('groupName'), L)), 'readonly': list(map(itemgetter('readonly'), L)), 'tags': list(map(itemgetter('tags'), L)), 'timeStamp': { 'secondsPastEpoch': S, 'nanoseconds': NS, 'userTag': eventid }, 'userTag': [0] * len(L), 'message': [u''] * len(L), } @rpc(multiType) def saveSnapshot(self, servicename=None, configname=None, comment=None, user=None, desc=None): if servicename not in (None, 'masar'): raise RemoteError("Bad servicename") with self.conn as conn: C = conn.cursor() C.execute( 'select id from config where name=? and next is NULL and active=1', (configname, )) cid = C.fetchone() if cid is None: raise RemoteError("Unknown config '%s'" % configname) cid = cid[0] _log.debug("saveSnapshot() for '%s'(%s)", configname, cid) C.execute( 'select id, name, tags, groupName, readonly from config_pv where config=?', (cid, )) config = C.fetchall() pvid = list(map(itemgetter('id'), config)) names = list(map(itemgetter('name'), config)) ret = self.gather(names) _log.debug("Gather complete") C.execute( 'insert into event(config, user, comment, created) values (?,?,?,?)', (cid, user, desc, self.now())) eid = C.lastrowid _log.debug("Create event %s", eid) C.executemany( """insert into event_pv(event, pv, dtype, severity, status, time, timens, value) values (? , ? , ? , ? , ? , ? , ? , ? );""", izip( repeat(eid, len(names)), pvid, ret['dbrType'].tolist(), ret['severity'].tolist(), ret['status'].tolist(), ret['secondsPastEpoch'].tolist(), ret['nanoseconds'].tolist(), [encodeValue(V) for V in ret['value']], )) _log.debug("event %s with %s %s", eid, len(names), C.rowcount) return self.retrieveSnapshot(eventid=eid) @rpc(NTScalar.buildType('?')) def updateSnapshotEvent(self, eventid=None, configname=None, user=None, desc=None): eventid = int(eventid) if user is None or desc is None: raise RemoteError("must provide user name and description") with self.conn as conn: C = conn.cursor() _log.debug("updateSnapshotEvent() update %s with %s '%s'", eventid, user, desc[20:]) evt = C.execute( 'select config.name from event inner join config on event.config=config.id where event.id=?', (eventid, )).fetchone() if evt is None: raise RemoteError("No event") elif configname is not None and configname != evt[0]: raise RemoteError('eventid and configname are inconsistent') C.execute( 'update event set user=?, comment=? where id=? and user is NULL and comment is NULL', (user, desc, eventid)) _log.debug("changed %s", C.rowcount) return { 'value': C.rowcount == 1, } @rpc(multiType) def getCurrentValue(self, names=None): return self.gather(list(names)) @rpc(multiType) def getLiveMachine(self, **kws): return self.gather(list(kws.values())) # for troubleshooting @rpc( NTTable.buildType([ ('config_idx', 'ai'), ('config_name', 'as'), ('config_desc', 'as'), ('config_create_date', 'as'), ('config_version', 'as'), ('status', 'as'), ])) def storeTestConfig(self, **kws): conf = Value( NTTable.buildType([ ('channelName', 'as'), ('readonly', 'a?'), ('groupName', 'as'), ('tags', 'as'), ]), { 'labels': ['channelName', 'readonly', 'groupName', 'tags'], 'value': { 'channelName': ['pv:f64:1', 'pv:i32:2', 'pv:str:3', 'pv:bad:4'], 'readonly': [False, False, False, False], 'groupName': ['A', '', '', ''], 'tags': ['', 'a, b', '', ''], }, }) return self.storeServiceConfig(config=conf, **kws) @rpc(NTScalar.buildType('s')) def dumpDB(self): return {'value': '\n'.join(self.conn.iterdump())} @rpc( NTTable.buildType([ ('config_idx', 'i'), ('config_name', 's'), ('config_desc', 's'), ('config_create_date', 's'), ('config_version', 's'), ('status', 's'), ])) def storeServiceConfigManual(self, pvs=None, ros=None, groups=None, tags=None, **kws): if pvs is None: raise RemoteError("Missing required pvs=") pvs = pvs.split(u',') N = len(pvs) if N == 0: raise RemoteError("No PVs") _log.debug("Load config %s", pvs) def mangle(L, P): L = (L or u'').split(u',') if len(L) < N: L = L + [P] * (N - len(L)) return L ros, groups, tags = mangle(ros, False), mangle(groups, u''), mangle(tags, u'') config = Value( configType.type, { 'labels': configType.labels, 'value': { 'channelName': pvs, 'readonly': numpy.asarray(ros, dtype=numpy.bool), 'groupName': groups, 'tags': tags, }, }) _log.info("Load config %s", config.tolist()) return self.storeServiceConfig(config=config, **kws)
from p4p import Type, Value from p4p.nt import NTTable from p4p.rpc import rpc, quickRPCServer name_table = NTTable([("name", "s")]) class DirectoryService(object): @rpc(name_table) def ds(self, *args, **kws): responses = ["This", "is", "a", "test"] response_object = [{"name": s} for s in responses] return name_table.wrap(response_object) print("Starting Directory Service Test Server!") tester = DirectoryService() quickRPCServer(provider="Directory Service Test Server", prefix="", target=tester) """ srv = pvaccess.RpcServer() def handle_request(request): response = pvaccess.PvObject({"labels": [pvaccess.STRING], "value": {"name": [pvaccess.STRING]}}, {"labels": ["name"], "value": {"name": ['This', 'is', 'a', 'test']}},'epics:nt/NTTable:1.0') return response """
from p4p.nt import NTScalar, NTMultiChannel, NTTable from p4p.wrapper import Value from .db import encodeValue, decodeValue multiType = NTMultiChannel.buildType('av', extra=[ ('dbrType', 'ai'), ('readonly', 'a?'), ('groupName', 'as'), ('tags', 'as'), ]) configType = NTTable([ ('channelName', 's'), ('readonly', '?'), ('groupName', 's'), ('tags', 's'), ]) configTable = NTTable([ ('config_idx', 'i'), ('config_name', 's'), ('config_desc', 's'), ('config_create_date', 's'), ('config_version', 's'), ('status', 's'), ('system', 's'), ]) def normtime(tstr):
twiss_data = None with open( os.path.join(os.path.dirname(os.path.realpath(__file__)), 'twiss_data.pkl'), 'rb') as f: twiss_data = pickle.load(f) if rmat_data is None or twiss_data is None: raise Exception("Could not load saved rmat or twiss data.") twiss_cols = [('ORDINAL', 'd'), ('ELEMENT_NAME', 's'), ('EPICS_CHANNEL_ACCESS_NAME', 's'), ('POSITION_INDEX', 's'), ('LEFF', 'd'), ('TOTAL_ENERGY', 'd'), ('PSI_X', 'd'), ('BETA_X', 'd'), ('ALPHA_X', 'd'), ('ETA_X', 'd'), ('ETAP_X', 'd'), ('PSI_Y', 'd'), ('BETA_Y', 'd'), ('ALPHA_Y', 'd'), ('ETA_Y', 'd'), ('ETAP_Y', 'd')] twiss_table = NTTable(twiss_cols) rows = [{key: twiss_data['value'][key][i] for key, _ in twiss_cols} for i in range(0, len(twiss_data['value']['ELEMENT_NAME']))] twiss_vals = twiss_table.wrap(rows) twiss_pv = SharedPV(nt=twiss_table, initial=twiss_vals) rmat_cols = [('ORDINAL', 'd'), ('ELEMENT_NAME', 's'), ('EPICS_CHANNEL_ACCESS_NAME', 's'), ('POSITION_INDEX', 's'), ('Z_POSITION', 'd'), ('R11', 'd'), ('R12', 'd'), ('R13', 'd'), ('R14', 'd'), ('R15', 'd'), ('R16', 'd'), ('R21', 'd'), ('R22', 'd'), ('R23', 'd'), ('R24', 'd'), ('R25', 'd'), ('R26', 'd'), ('R31', 'd'), ('R32', 'd'), ('R33', 'd'), ('R34', 'd'), ('R35', 'd'), ('R36', 'd'), ('R41', 'd'), ('R42', 'd'), ('R43', 'd'), ('R44', 'd'), ('R45', 'd'), ('R46', 'd'), ('R51', 'd'), ('R52', 'd'), ('R53', 'd'),