def __init__(self, db, rt_db, verbose, print_all): self.mps = MPSConfig( args.database[0].name, args.database[0].name.split('.')[0] + '_runtime.db') self.session = self.mps.session self.rt_session = self.mps.runtime_session self.mps_names = MpsName(self.session) self.verbose = verbose self.print_all = print_all self.rt = RuntimeChecker(self.session, self.rt_session, self.verbose)
def __init__(self, db, rt_db, verbose, force_write, no_check=False): self.mps = MPSConfig( args.database[0].name, args.database[0].name.split('.')[0] + '_runtime.db') self.session = self.mps.session self.rt_session = self.mps.runtime_session self.mps_names = MpsName(self.session) self.rt = RuntimeChecker(self.session, self.rt_session, False) self.force_write = force_write self.verbose = verbose self.no_check = no_check
def __init__(self, db_file, template_path, dest_path, app_id, verbose): self.template_path = template_path self.dest_path = dest_path self.verbose = verbose self.app_id = app_id self.alarm_info = {} self.areas = [ 'global2', 'GUNB', 'L0B', 'HTR', 'L1B', 'BC1B', 'L2B', 'BC2B', 'L3B', 'EXT', 'DOG', 'BYP', 'SLTH', 'SLTS', 'BSYH', 'BSYS', 'LTUH', 'LTUS', 'UNDH', 'UNDS', 'DMPH', 'DMPS', 'FEEH', 'FEES' ] self.areas_in_order = self.areas # Open a session to the MPS database with MpsDbReader(db_file) as mps_db_session: # Extract the application information self.mps_name = MpsName(mps_db_session) self.__extract_alarms(mps_db_session)
def __init__(self, db_file='', app_id=None, verbose=False): self.app_id = app_id self.verbose = verbose # This is the list of all applications self.analog_apps = [] self.digital_apps = [] # List of Link Nodes by cpu_name + slot - track if it has only digital, analog or both apps self.link_nodes = {} self.config_version = os.path.basename(db_file).lstrip( "mps_config-").rstrip(".db") # Open a session to the MPS database with MpsDbReader(db_file) as mps_db_session: # Extract the application information self.__extract_apps(mps_db_session) self.mps_name = MpsName(mps_db_session)
parser.add_argument('-v', action='store_true', default=False, dest='verbose', help='Verbose output') args = parser.parse_args() verbose = args.verbose mps_app_reader = MpsAppReader(db_file=args.database[0].name, verbose=args.verbose) mps = MPSConfig(args.database[0].name) session = mps.session mps_name = MpsName(session) link_nodes = None link_node = None if (args.link_nodes): link_nodes = session.query(models.LinkNode).all() create_link_node_directories(args.link_nodes, link_nodes, mps_name) if (args.link_node): link_node = args.link_node if (len(filter(lambda x: x.get_name() == link_node, link_nodes)) != 1): print 'ERROR: Can\'t find sioc named {0}'.format(link_node) exit(0) else: link_nodes = filter(lambda x: x.get_name() == link_node, link_nodes) print 'INFO: Producing screens for SIOC {0} only'.format(link_node)
def export(session, file, node): file.write("digraph {\n") file.write(' label="Map of inputs to Link Node {0}"'.format(node)) file.write(" node [fontname=\"sansserif\", fontsize=12]\n") # write link nodes link_nodes = session.query(models.LinkNode).order_by( models.LinkNode.id.asc()).all() ln = {} ln_color = {} ci = 0 for l in link_nodes: ln[l.id] = l ln_color[l.id] = colors[ci] ci = (ci + 1) % len(colors) if (node == l.get_name()): ln_info = '{0}\\n{1}\\n{2} (id={3})\\n'.format( l.get_name(), l.cpu, l.crate.get_name(), l.crate.crate_id) slots = [] for n in l.crate.link_nodes: slots.append(n.slot_number) for c in l.crate.cards: card_type = session.query(models.ApplicationType).\ filter(models.ApplicationType.id==c.type_id).one() if (c.slot_number == l.slot_number or (l.slot_number == 2 and not c.slot_number in slots)): ln_info = '{0}\\n{1} (slot {2})'.format( ln_info, card_type.name, c.slot_number) file.write(' "{0}" [shape=box3d, color={1}, label="{2}"]\n'.\ format(l.get_name(), ln_color[l.id], ln_info)) # write link node cards cards = session.query(models.ApplicationCard).order_by( models.ApplicationCard.crate_id.asc()).all() cards = select_link_node_cards(session, cards, node) if (len(cards) == 0): print("WARN: No cards for link node {}".format(node)) return card_ids = [] for c in cards: card_ids.append(c.id) for card in cards: crate = session.query( models.Crate).filter(models.Crate.id == card.crate_id).one() card_type = session.query(models.ApplicationType).\ filter(models.ApplicationType.id==card.type_id).one() nodes = [] for n in crate.link_nodes: nodes.append(n.get_name()) if (node in nodes or node == 'All'): channels = '' if (len(card.analog_channels) > 0): for ac in card.analog_channels: channels = channels + "{0} (ch {1})\\n".format( ac.analog_device.name, ac.number) elif (len(card.digital_channels) > 0): for dc in card.digital_channels: device = session.query(models.DigitalDevice).\ filter(models.DigitalDevice.id==dc.device_input.digital_device_id).one() channels = channels + "{0} (ch {1})\\n".format( device.name, dc.number) file.write(' "{0}" [shape=box3d, color={1}, label="{2}"]\n'.\ format('c{0}'.format(card.id), ln_color[crate.link_nodes[0].id], '{0} (slot {1})\\n\\n{2}'.format(card_type.name, card.slot_number,channels))) devices = session.query(models.Device).order_by( models.Device.z_location.asc()).all() devices = select_link_node_devices(session, devices, node) # write linked list of devices (based on z-location) file.write(' {rank=same;') for d in devices: if (d.card_id != None): try: card = session.query(models.ApplicationCard).filter( models.ApplicationCard.id == d.card_id).one() crate = session.query(models.Crate).filter( models.Crate.id == card.crate_id).one() # if (node == crate.link_node.get_name() or node == 'All'): if (d.card_id in card_ids): file.write('"{0}"->'.format(d.name)) except: print 'ERROR: Failed to find card for device {0} {1}'.format( d.name, d.card_id) file.write('END}\n') # write device information mpsName = MpsName(session) for d in devices: printDevices(session, file, d, node, ln_color, mpsName) # device->card edges for d in devices: if (d.card_id != None): card = session.query(models.ApplicationCard).filter( models.ApplicationCard.id == d.card_id).one() crate = session.query( models.Crate).filter(models.Crate.id == card.crate_id).one() color = ln_color[crate.link_nodes[0].id] label = d.name nodes = [] for n in crate.link_nodes: nodes.append(n.get_name()) if (node in nodes or node == 'All'): if d.discriminator == 'digital_device': channel = '' for di in d.inputs: try: ch = session.query(models.DigitalChannel).\ filter(models.DigitalChannel.id==di.channel_id).one() channel = channel + str(ch.number) if len(d.inputs) > 1: channel = channel + "," except: print 'ERROR: Failed to find analog channel for device (name={0}, channel_id={1}'.\ format(d.name, d.channel_id) if len(d.inputs) > 1: channel = channel[:-1] elif (d.discriminator == 'analog_device'): try: ch = session.query(models.AnalogChannel).filter( models.AnalogChannel.id == d.channel_id).one() except: print 'ERROR: Failed to find analog channel for device (name={0}, channel_id={1}'.\ format(d.name, d.channel_id) else: channel = ch.number else: channel = 0 file.write('edge [dir=none, color={0}]\n'.format(color)) file.write('"{0}"->"c{1}" [label="ch {2}"]\n'.format( d.name, card.id, channel)) # card->sioc edges nodes = [] for n in crate.link_nodes: nodes.append(n.get_name()) for c in cards: crate = session.query( models.Crate).filter(models.Crate.id == c.crate_id).one() color = ln_color[crate.link_nodes[0].id] label = d.name if (node is nodes or node == 'All'): file.write('edge [dir=none, color={0}]\n'.format(color)) file.write('"c{0}"->"{1}" [label="s {2}"]\n'.\ format(c.id, ln[crate.link_node.id].get_name(), c.slot_number)) file.write("}\n") file.close()
def __init__(self, session, rt_session, verbose): self.session = session self.rt_session = rt_session self.verbose = verbose self.mps_names = MpsName(session)
def check_databases(self): self.mps_name = MpsName(self.session) # First check the devices in both databases devices = self.session.query(models.Device).all() rt_devices = self.rt_session.query(runtime.Device).all() if (self.verbose): sys.stdout.write('Checking number of devices in both databases...') if (len(devices) != len(rt_devices)): print('') print('ERROR: Number of devices in databases must be the same') print(' found {0} devices in config database'.format(len(devices))) print(' found {0} devices in runtime database'.format(len(rt_devices))) return False if (self.verbose): print(' done. Found {} devices.'.format(len(devices))) # Extract device_ids and names and sort d = [ [i.id, i.name] for i in devices ] d = sorted(d, key=lambda v: v[0]) rt_d = [ [i.mpsdb_id, i.mpsdb_name] for i in rt_devices ] rt_d = sorted(rt_d, key=lambda v: v[0]) # Compare one by one the devices, they must be exactly the same if (self.verbose): sys.stdout.write('Checking device names and ids in both databases...') for a, b in zip(d, rt_d): if (a[0] != b[0] or a[1] != b[1]): print('') print('ERROR: Mismatched devices found') print(' {0} [id={1}] in config database'.format(a[0], a[1])) print(' {0} [id={1}] in runtime database'.format(b[0], b[1])) return False if (self.verbose): print(' done.') # Now check the device_inputs (i.e. digital devices) device_inputs = self.session.query(models.DeviceInput).all() rt_device_inputs = self.rt_session.query(runtime.DeviceInput).all() # Extract device_ids and names and sort di = [ [i.id, self.mps_name.getDeviceInputName(i)] for i in device_inputs ] d.sort() rt_di = [ [i.mpsdb_id, i.pv_name] for i in rt_device_inputs ] rt_di.sort() # Compare one by one the device inputs, they must be exactly the same if (self.verbose): sys.stdout.write('Checking device inputs (digital channels) names and ids in both databases...') for a, b in zip(di, rt_di): if (a[0] != b[0] or a[1] != b[1]): print 'ERROR: Mismatched devices found' print ' {0} [id={1}] in config database'.format(a[0], a[1]) print ' {0} [id={1}] in runtime database'.format(b[0], b[1]) return False if (self.verbose): print(' done.')
class RuntimeChecker: threshold_tables = ['threshold0','threshold1','threshold2','threshold3', 'threshold4','threshold5','threshold6','threshold7', 'threshold_alt0', 'threshold_alt1','threshold_alt2', 'threshold_alt3', 'threshold_alt4', 'threshold_alt5','threshold_alt6', 'threshold_alt7', 'threshold_lc1', 'threshold_idl'] std_threshold_tables = ['threshold0','threshold1','threshold2','threshold3', 'threshold4','threshold5','threshold6','threshold7'] alt_threshold_tables = ['threshold_alt0', 'threshold_alt1','threshold_alt2', 'threshold_alt3', 'threshold_alt4', 'threshold_alt5','threshold_alt6', 'threshold_alt7'] lc1_tables = ['threshold_lc1'] idl_tables = ['threshold_idl'] threshold_tables_pv = ['lc2', 'lc2', 'lc2', 'lc2', 'lc2', 'lc2', 'lc2', 'lc2', 'alt', 'alt', 'alt', 'alt', 'alt', 'alt', 'alt', 'alt', 'lc1', 'idl'] threshold_types = ['l','h'] integrators = ['i0','i1','i2','i3'] threshold_index = ['t0', 't1', 't2', 't3', 't4', 't5', 't6', 't7', 't0', 't1', 't2', 't3', 't4', 't5', 't6', 't7', 't0', 't0'] def __init__(self, session, rt_session, verbose): self.session = session self.rt_session = rt_session self.verbose = verbose self.mps_names = MpsName(session) def get_device_input_id_from_pv(self, pv_name): return False def get_device_id_from_name(self, name): try: d = self.session.query(models.Device).filter(models.Device.name==name).one() return d.id except: print 'ERROR: Cannot find device "{0}"'.format(name) return None def get_thresholds(self, device, active_only=True): """ Return a list of all possible thresholds for the specified device, including the value/active from the database. This is the format: [{ 'pv': pyepics_pv for the threshold PV 'pv_enable': pyepics_pv for the threshold enable PV 'db_table': threshold table name in the runtime database 'integrator': from the array self.integrators 'threshold_type': from the array self.threshold_type ('l' or 'h') 'active': True or False 'value': threshold value from the database }, ...] """ threshold_list=[] try: rt_d = self.rt_session.query(runtime.Device).\ filter(runtime.Device.mpsdb_id==device.id).one() except: print('ERROR: Failed to find device id {} in runtime database'.format(device_id)) return None is_bpm = False if (device.device_type.name == 'BPMS'): is_bpm = True for t_index, t_table in enumerate(self.threshold_tables): for integrator in self.integrators: if (integrator == 'i3' and is_bpm): # i3 is not valid for BPMs only i0, i1 and i3 (x, y and tmit) continue for t_type in self.threshold_types: threshold_item = {} pv_name = self.mps_names.getThresholdPv(self.mps_names.getAnalogDeviceNameFromId(device.id), self.threshold_tables_pv[t_index], self.threshold_index[t_index], integrator, t_type, is_bpm) if (pv_name == None): print('ERROR: Failed to find threshold PV name for device \'{}\' [threshold={}, integrator={}, is_bpm={}]'.\ format(device.name, self.threshold_tables_pv[t_index], integrator, is_bpm)) return None pv_name_enable = pv_name + '_EN' if (pv_name): threshold_item['db_table'] = t_table threshold_item['integrator'] = integrator threshold_item['threshold_type'] = t_type threshold_item['active'] = bool(getattr(getattr(rt_d, threshold_item['db_table']), '{0}_{1}_active'.\ format(integrator, t_type))) threshold_item['value'] = float(getattr(getattr(rt_d, threshold_item['db_table']), '{0}_{1}'.\ format(integrator, t_type))) if (active_only): if (threshold_item['active']): threshold_item['pv'] = PV(pv_name) threshold_item['pv_enable'] = PV(pv_name_enable) else: threshold_item['pv'] = None threshold_item['pv_enable'] = None else: threshold_item['pv'] = PV(pv_name) threshold_item['pv_enable'] = PV(pv_name_enable) threshold_list.append(threshold_item) return threshold_list def check_device_thresholds(self, device): threshold_list = self.get_thresholds(device) invalid_pvs = False invalid_pv_names = '' read_pv_error = False read_pv_names = '' mismatch_value = False mismatch_pv_names = '' for threshold_item in threshold_list: if (threshold_item['active']): if (threshold_item['pv'].host == None): invalid_pvs = True invalid_pv_names = '{} * {}={}\n'.format(bad_pv_names, threshold_item['pv'].pvname, threshold_item['value']) else: try: pv_value = threshold_item['pv'].get() except epics.ca.CASeverityException: read_pv_error = True read_pv_names = '{} * {}\n'.format(read_pv_names, threshold_item['pv'].pvname) continue if (pv_value != threshold_item['value']): mismatch_value = True mismatch_pv_names = '{} * {} (PV={}, DB={})\n'.\ format(mismatch_pv_names, threshold_item['pv'].pvname, pv_value, threshold_item['value']) if (invalid_pvs or read_pv_error or mismatch_value): if (invalid_pvs): print('ERROR: Cannot reach these PVs:') print(invalid_pv_names) if (read_pv_error): print('ERROR: Cannot read these PVs:') print(read_pv_names) if (mismatch_value): print('ERROR: Threshold values are different for these PVs:') print(mismatch_pv_names) return False return True def check_app_thresholds(self, app_id): """ Check whether the runtime database thresholds are the same as the values set in the SIOCs. """ app = None try: app = self.session.query(models.ApplicationCard).filter(models.ApplicationCard.global_id==app_id).one() except: print('ERROR: Cannot find application with global id {}.'.format(app_id)) return False if (len(app.analog_channels) > 0): for c in app.analog_channels: [device, rt_device] = self.check_device(c.analog_device.id) if (device == None): print('ERROR: Cannot check device') return False self.check_device_thresholds(device) return True def check_device(self, device_id): """ Verify if device_id is mapped on both databases. Returns None if there is a mismatch, and a pair [device, rt_device] if device_id is valid on both """ try: device = self.session.query(models.Device).\ filter(models.Device.id==device_id).one() except: print('ERROR: Failed to find device id {} in database'.format(device_id)) return [None, None] try: rt_device = self.rt_session.query(runtime.Device).\ filter(runtime.Device.mpsdb_id==device_id).one() except: print('ERROR: Failed to find device id {} in runtime database'.format(device_id)) return [None, None] if (rt_device.mpsdb_name != device.name): print('ERROR: Device names are different in MPS database and runtime database:') print(' * MPS Database name: {}'.format(device.name)) print(' * RT Database name: {}'.format(rt_device.mpsdb_name)) return [None, None] return [device, rt_device] def check_device_input(self, device_input_id): """ Verify if device_input_id is mapped on both databases. Returns None if there is a mismatch, and a pair [device_input, rt_device_input] if device_input_id is valid on both """ try: device_input = self.session.query(models.DeviceInput).\ filter(models.DeviceInput.id==device_input_id).one() except Exception as ex: print ex print('ERROR: Failed to find device_input id {} in database'.format(device_input_id)) return [None, None] try: rt_device_input = self.rt_session.query(runtime.DeviceInput).\ filter(runtime.DeviceInput.mpsdb_id==device_input_id).one() except Exception as ex: print ex print('ERROR: Failed to find device_input id {} in runtime database'.format(device_input_id)) return [None, None] return [device_input, rt_device_input] def check_databases(self): self.mps_name = MpsName(self.session) # First check the devices in both databases devices = self.session.query(models.Device).all() rt_devices = self.rt_session.query(runtime.Device).all() if (self.verbose): sys.stdout.write('Checking number of devices in both databases...') if (len(devices) != len(rt_devices)): print('') print('ERROR: Number of devices in databases must be the same') print(' found {0} devices in config database'.format(len(devices))) print(' found {0} devices in runtime database'.format(len(rt_devices))) return False if (self.verbose): print(' done. Found {} devices.'.format(len(devices))) # Extract device_ids and names and sort d = [ [i.id, i.name] for i in devices ] d = sorted(d, key=lambda v: v[0]) rt_d = [ [i.mpsdb_id, i.mpsdb_name] for i in rt_devices ] rt_d = sorted(rt_d, key=lambda v: v[0]) # Compare one by one the devices, they must be exactly the same if (self.verbose): sys.stdout.write('Checking device names and ids in both databases...') for a, b in zip(d, rt_d): if (a[0] != b[0] or a[1] != b[1]): print('') print('ERROR: Mismatched devices found') print(' {0} [id={1}] in config database'.format(a[0], a[1])) print(' {0} [id={1}] in runtime database'.format(b[0], b[1])) return False if (self.verbose): print(' done.') # Now check the device_inputs (i.e. digital devices) device_inputs = self.session.query(models.DeviceInput).all() rt_device_inputs = self.rt_session.query(runtime.DeviceInput).all() # Extract device_ids and names and sort di = [ [i.id, self.mps_name.getDeviceInputName(i)] for i in device_inputs ] d.sort() rt_di = [ [i.mpsdb_id, i.pv_name] for i in rt_device_inputs ] rt_di.sort() # Compare one by one the device inputs, they must be exactly the same if (self.verbose): sys.stdout.write('Checking device inputs (digital channels) names and ids in both databases...') for a, b in zip(di, rt_di): if (a[0] != b[0] or a[1] != b[1]): print 'ERROR: Mismatched devices found' print ' {0} [id={1}] in config database'.format(a[0], a[1]) print ' {0} [id={1}] in runtime database'.format(b[0], b[1]) return False if (self.verbose): print(' done.') def add_device_input_bypass(self, device_input, rt_device_input): pv_name = self.mps_names.getDeviceInputName(device_input) bypass = runtime.Bypass(device_input=rt_device_input, startdate=int(time.time()), duration=0, pv_name=pv_name) self.rt_session.add(bypass) def add_analog_bypass(self, device, rt_device): # Get the fault inputs that use the analog device try: fault_inputs = self.session.query(models.FaultInput).filter(models.FaultInput.device_id==device.id).all() except: print('ERROR: Failed find fault inputs for device id {} in database'.format(device.id)) return None # From the fault inputs find which integrators are being used fa_names = ['', '', '', ''] for fi in fault_inputs: faults = self.session.query(models.Fault).filter(models.Fault.id==fi.fault_id).all() for fa in faults: fa_names[fa.get_integrator_index()] = fa.name if (len(fault_inputs) == 0): return None for i in range(4): pv_name = self.mps_names.getAnalogDeviceName(device) if (fa_names[i] == ''): pv_name = '' else: pv_name = pv_name + ':' + fa_names[i] bypass = runtime.Bypass(device_id=device.id, startdate=int(time.time()), duration=0, device_integrator=i, pv_name=pv_name) self.rt_session.add(bypass) def add_runtime_thresholds(self, device): t0 = runtime.Threshold0(device=device, device_id=device.id) self.rt_session.add(t0) t1 = runtime.Threshold1(device=device, device_id=device.id) self.rt_session.add(t1) t2 = runtime.Threshold2(device=device, device_id=device.id) self.rt_session.add(t2) t3 = runtime.Threshold3(device=device, device_id=device.id) self.rt_session.add(t3) t4 = runtime.Threshold4(device=device, device_id=device.id) self.rt_session.add(t4) t5 = runtime.Threshold5(device=device, device_id=device.id) self.rt_session.add(t5) t6 = runtime.Threshold6(device=device, device_id=device.id) self.rt_session.add(t6) t7 = runtime.Threshold7(device=device, device_id=device.id) self.rt_session.add(t7) t0 = runtime.ThresholdAlt0(device=device, device_id=device.id) self.rt_session.add(t0) t1 = runtime.ThresholdAlt1(device=device, device_id=device.id) self.rt_session.add(t1) t2 = runtime.ThresholdAlt2(device=device, device_id=device.id) self.rt_session.add(t2) t3 = runtime.ThresholdAlt3(device=device, device_id=device.id) self.rt_session.add(t3) t4 = runtime.ThresholdAlt4(device=device, device_id=device.id) self.rt_session.add(t4) t5 = runtime.ThresholdAlt5(device=device, device_id=device.id) self.rt_session.add(t5) t6 = runtime.ThresholdAlt6(device=device, device_id=device.id) self.rt_session.add(t6) t7 = runtime.ThresholdAlt7(device=device, device_id=device.id) self.rt_session.add(t7) t = runtime.ThresholdLc1(device=device, device_id=device.id) self.rt_session.add(t) t = runtime.ThresholdIdl(device=device, device_id=device.id) self.rt_session.add(t) def create_runtime_database(self): print 'Creating thresholds/bypass database' devices = self.session.query(models.Device).all() for d in devices: rt_d = runtime.Device(mpsdb_id = d.id, mpsdb_name = d.name) self.rt_session.add(rt_d) self.rt_session.commit() # Add thresholds - if device is analog analog_devices = self.session.query(models.AnalogDevice).filter(models.AnalogDevice.id==d.id).all() if (len(analog_devices)==1): self.add_runtime_thresholds(rt_d) self.add_analog_bypass(d, rt_d) device_inputs = self.session.query(models.DeviceInput).all() for di in device_inputs: di_pv = self.mps_names.getDeviceInputNameFromId(di.id) rt_di = runtime.DeviceInput(mpsdb_id = di.id, device_id = di.digital_device.id, pv_name = di_pv) self.rt_session.add(rt_di) self.add_device_input_bypass(di, rt_di) self.rt_session.commit()
class MpsAlarmReader: """ This class extract all the necessary information of each application defined in the MPS database, necessary to generate the alarm files """ def __init__(self, db_file, template_path, dest_path, app_id, verbose): self.template_path = template_path self.dest_path = dest_path self.verbose = verbose self.app_id = app_id self.alarm_info = {} self.areas = [ 'global2', 'GUNB', 'L0B', 'HTR', 'L1B', 'BC1B', 'L2B', 'BC2B', 'L3B', 'EXT', 'DOG', 'BYP', 'SLTH', 'SLTS', 'BSYH', 'BSYS', 'LTUH', 'LTUS', 'UNDH', 'UNDS', 'DMPH', 'DMPS', 'FEEH', 'FEES' ] self.areas_in_order = self.areas # Open a session to the MPS database with MpsDbReader(db_file) as mps_db_session: # Extract the application information self.mps_name = MpsName(mps_db_session) self.__extract_alarms(mps_db_session) # pp=PrettyPrinter(indent=4) # pp.pprint(self.alarm_info) # exit(1) def __extract_alarms(self, mps_db_session): """ Extract all alarm information from the MPS database. A session to the database is passed as an argument. Returns the following structure: alarm_info = { 'area name': { 'area': 'area name, e.g. GUNB or global2' 'faults': ['fault_PV', 'fault_PV, ... ] 'apps': ['app_PV', 'app_PV, ... ] 'ln' : { 'mp01' : ['ln_PV', 'ln_PV', ... ] ... } }, { 'area': ... }, ... } """ try: app_cards = mps_db_session.query(models.ApplicationCard).all() faults = mps_db_session.query(models.Fault).all() destinations = mps_db_session.query(models.BeamDestination).all() except exc.SQLAlchemyError as e: raise # Check if there were applications/faults defined in the database if len(app_cards) == 0: return if len(faults) == 0: return # Alarms for app status (i.e. alarm when an APP is not updating MPS) for app_card in app_cards: area = self.check_area(app_card.area.upper()) if area in self.alarm_info: alarm_area = self.alarm_info[area] else: alarm_area = {} alarm_area['faults'] = [] alarm_area['apps'] = [] alarm_area['link_nodes'] = {} self.alarm_info[area] = alarm_area # The $(BASE) macro defines in which central node this PV lives # Currenly it is set to SIOC:SYS2:MP01 in this script alarm_area['apps'].append('$(BASE):APP{}_STATUS'.format( app_card.global_id)) link_node = format(app_card.link_node.location).upper() if link_node in alarm_area['link_nodes']: alarm_area['link_nodes'][link_node] = self.add_ln_alarms( app_card, alarm_area['link_nodes'][link_node]) else: alarm_area['link_nodes'][link_node] = self.add_ln_alarms( app_card, None) # Alarms for faults for all other areas for fault in faults: pv = self.mps_name.getFaultName(fault) if (pv == None): pv = 'BASE:AREA:POSITION:FAULT' area = self.check_area(pv.split(":")[1].upper()) if area in self.alarm_info: alarm_area = self.alarm_info[area] else: alarm_area = {} alarm_area['faults'] = [] alarm_area['apps'] = [] self.alarm_info[area] = alarm_area alarm_area['faults'].append(pv) # Alarms for beam destinations area = 'global2' alarm_area = {} alarm_area['faults'] = [] alarm_area['apps'] = [] for destination in destinations: pv = self.mps_name.getBeamDestinationName(destination) alarm_area['faults'].append(pv) self.alarm_info[area] = alarm_area # Save a list of areas in a separate array self.areas = [] for area in self.alarm_info: if (area != 'NONE'): self.areas.append(area) self.areas = self.sort_areas() def sort_areas(self): sorted_areas = [] for a in self.areas_in_order: if a in self.areas: sorted_areas.append(a) return sorted_areas def add_ln_alarms(self, app_card, ln_pvs): if not ln_pvs: ln_pvs = [] ln_pvs.append('{}:{}:MPS_EN'.format(app_card.link_node.get_pv_base(), app_card.get_card_id())) ln_pvs.append('{}:{}:TIM_LINK_STAT'.format( app_card.link_node.get_pv_base(), app_card.get_card_id())) # ln_pvs.append('{}:{}:THR_LOADED'.format(app_card.link_node.get_pv_base(), app_card.get_card_id())) return ln_pvs def check_area(self, area): if not area in self.areas: if (area.startswith('BPN')): return 'BYP' elif (area == 'SPH'): return 'BSYH' elif (area == 'SPS'): return 'BSYS' elif (area == 'SPD'): return 'BSYH' elif (area == 'CLTS'): return 'BSYS' else: return 'NONE' else: return area def generate_alarm_files(self, template_body_name, template_header_name, template_include_name): """ Generate the EPICS database and configuration files from the application data obtained by the extract_alarms method. The files will be written in the destination directory specified from the user (TOP), following the following structure: <TOP>/alarms Files: mps.alhConfig |- mps_global2.alhConfig | ... |- mps_<AREA>.alhConfig |- mps_<AREA>_flt.alhConfig (faults) |- mps_<AREA>_apps.alhConfig (app timeouts) |- mps_<AREA>_mpXXX.alhConfig (link node XXX alarms) """ if (self.verbose): print("==================================================") print("== Generating alarm handler files: ==") print("==================================================") for area in self.areas_in_order: if not area in self.alarm_info: continue alarm_info = self.alarm_info[area] # for area, alarm_info in self.alarm_info.iteritems(): # pp=PrettyPrinter(indent=4) # pp.pprint(self.alarm_info) include_files = [] # Generate mps_<AREA>_faults.alhConfig file if len(alarm_info['faults']) > 0: alh_filename = '{}_faults'.format(area.lower()) include_files.append('faults') alh_filename = 'mps_' + alh_filename + '.alhConfig' group_name = 'MPS:{}:FLT'.format(area.upper()) group_alias = 'Faults' group_pv = group_name self.generate_alh_file( template_body_name, template_header_name, group_name, group_alias, group_pv, area, alarm_info['faults'], '{}{}'.format(self.dest_path, alh_filename)) # Generate mps_<AREA>_apps.alhConfig file if len(alarm_info['apps']) > 0: alh_filename = '{}_apps'.format(area.lower()) include_files.append('apps') alh_filename = 'mps_' + alh_filename + '.alhConfig' group_name = 'MPS:{}:APPS'.format(area.upper()) group_alias = 'Applications' group_pv = group_name self.generate_alh_file( template_body_name, template_header_name, group_name, group_alias, group_pv, area, alarm_info['apps'], '{}{}'.format(self.dest_path, alh_filename)) # Generate mps_<AREA>_mp<XX>.alhConfig files if 'link_nodes' in alarm_info: for link_node, link_node_pvs in alarm_info[ 'link_nodes'].iteritems(): alh_filename = '{}_{}'.format(area.lower(), link_node.lower()) include_files.append(link_node.lower()) alh_filename = 'mps_' + alh_filename + '.alhConfig' group_name = 'MPS:{}:{}'.format(area.upper(), link_node.upper()) group_alias = 'Link Node {}'.format(link_node.upper()) group_pv = group_name self.generate_alh_file( template_body_name, template_header_name, group_name, group_alias, group_pv, area, link_node_pvs, '{}{}'.format(self.dest_path, alh_filename)) alh_include_filename = '{}mps_{}.alhConfig'.format( self.dest_path, area.lower()) group_name = 'MPS:{}'.format(area.upper()) group_alias = area.upper() group_pv = group_name self.generate_alh_include_file(template_header_name, template_include_name, group_name, group_alias, group_pv, area, include_files, alh_include_filename) self.generate_mps_alh_file(template_header_name, template_include_name) def generate_alh_include_file(self, template_header_name, template_include_name, group_name, group_alias, group_pv, area, include_files, alh_include_filename): """ Generate alarm file with a list of include files """ system_macro = 'mps' substitution_header_file = '{}mps_{}_header.substitution'.format( self.dest_path, area.lower()) self.generate_substitution_header_file(substitution_header_file, template_header_name, group_name, group_alias, group_pv, area, system_macro) generated_template_header = '{}mps_{}.template.header'.format( self.dest_path, area.lower()) self.run_msi(substitution_header_file, generated_template_header) substitution_include_file = '{}mps_{}_include.substitution'.format( self.dest_path, area.lower()) self.generate_substitution_include_file(substitution_include_file, template_include_name, 'mps', area, group_name, include_files) generated_template_body = '{}mps_{}.template.body'.format( self.dest_path, area.lower()) self.run_msi(substitution_include_file, generated_template_body) self.concatenate_files(alh_include_filename, generated_template_header, generated_template_body) return def generate_mps_alh_file(self, template_header_name, template_include_name): area = 'mps' system_macro = 'all2' substitution_header_file = '{}mps_{}_header.substitution'.format( self.dest_path, area.lower()) self.generate_substitution_header_file(substitution_header_file, template_header_name, 'ALL2:MPS:1', 'MPS', 'ALL2:MPS:1', area, system_macro) generated_template_header = '{}mps_{}.template.header'.format( self.dest_path, area.lower()) self.run_msi(substitution_header_file, generated_template_header) substitution_include_file = '{}mps_{}_include.substitution'.format( self.dest_path, area.lower()) self.generate_substitution_include_file(substitution_include_file, template_include_name, 'all2', 'mps', 'ALL2:MPS:1', self.areas) generated_template_body = '{}mps_{}.template.body'.format( self.dest_path, area.lower()) self.run_msi(substitution_include_file, generated_template_body) alh_file = '{}mps.alhConfig'.format(self.dest_path) self.concatenate_files(alh_file, generated_template_header, generated_template_body) return def generate_substitution_include_file(self, file_name, template_body_name, system, subsystem, group_name, areas): with open(file_name, 'w') as f: f.write('file "{}alarms/{}" {{ pattern\n'.\ format(self.template_path, template_body_name)) f.write( '{SYSTEM, SUBSYSTEM, SUBSYSTEM_LOWER, GROUP_NAME, AREA_LOWER}\n' ) # for key, value in self.alarm_info.iteritems(): # sys.stdout.write('| {0: >7} | {1: >3} | {2: >3}'.format(key, len(value['faults']), len(value['app']))) for area in areas: if system == 'mps' or system == 'MPS': f.write('{{ "{}", "{}", "{}", "{}", "{}" }}\n'.\ format(system.upper(), subsystem.upper(), 'mps_{}'.format(subsystem.lower()), group_name, area.lower())) else: f.write('{{ "{}", "{}", "{}", "{}", "{}" }}\n'.\ format(system.upper(), subsystem.upper(), subsystem.lower(), group_name, area.lower())) f.write('}\n') def generate_substitution_header_file(self, file_name, template_header_name, group_name, group_alias, group_pv, area, system_macro): # Generate substitutions file with open(file_name, 'w') as f: f.write('file "{}alarms/{}" {{ pattern\n'.\ format(self.template_path, template_header_name)) f.write( '{GROUP_NAME, GROUP_ALIAS, GROUP_PV, AREA, SYSTEM, BASE}\n') f.write('{{ "{}", "{}", "{}", "{}", "{}", "SIOC:SYS2:MP01" }}\n'.\ format(group_name, group_alias, group_pv, area.upper(), system_macro.upper())) f.write('}\n') def generate_substitution_body_file(self, file_name, template_body_name, group_name, area, system_macro, pvs): with open(file_name, 'w') as f: f.write('file "{}alarms/{}" {{ pattern\n'.\ format(self.template_path, template_body_name)) f.write('{AREA, GROUP_NAME, SYSTEM, BASE, FAULT_PV}\n') for pv in pvs: f.write('{{ "{}", "{}", MPS, SIOC:SYS2:MP01 {} }}\n'.\ format(area.upper(), group_name, pv.upper())) f.write('}\n') def run_msi(self, substitution_file, template_file): msi_cmd = 'msi -V -S {} -o {}'.\ format(substitution_file, template_file) os.system(msi_cmd) os.system('rm -f {}'.format(substitution_file)) def concatenate_files(self, output, header, body): os.system('mv -f {} {}'.format(header, output)) os.system('cat {} >> {}'.format(body, output)) os.system('rm -f {}'.format(body)) def generate_alh_file(self, template_body_name, template_header_name, group_name, group_alias, group_pv, area, pvs, alh_filename): # Generate substitutions file substitution_header_file = '{}mps_{}_header.substitution'.format( self.dest_path, area.lower()) self.generate_substitution_header_file(substitution_header_file, template_header_name, group_name, group_alias, group_pv, area, 'MPS') substitution_body_file = '{}mps_{}_body.substitution'.format( self.dest_path, area.lower()) self.generate_substitution_body_file(substitution_body_file, template_body_name, group_name, area, 'MPS', pvs) generated_template_header = '{}mps_{}.template.header'.format( self.dest_path, area.lower()) self.run_msi(substitution_header_file, generated_template_header) generated_template_body = '{}mps_{}.template.body'.format( self.dest_path, area.lower()) self.run_msi(substitution_body_file, generated_template_body) self.concatenate_files(alh_filename, generated_template_header, generated_template_body) def print_alarm_data(self): """ Print the content of the application data obtained by the extract_alarms method. """ print('+- Area --+- # Fault PVs -+- # App PVs -+- # LN PVs -+') for key, value in self.alarm_info.iteritems(): ln_pvs = 0 if 'link_nodes' in value: for ln, pvs in value['link_nodes'].iteritems(): ln_pvs += len(pvs) sys.stdout.write('| {0: >7} | {1: >3} | {2: >3} | {2: > 3} |'.\ format(key, len(value['faults']), len(value['apps']), ln_pvs)) # for pv in value['global_id']: # sys.stdout.write('{0: >3} '.format(pv)) print('') print('+---------+---------------+-------------+------------+')
def __init__(self, db_file_name, rt_file_name): self.mps = MPSConfig(db_file_name, rt_file_name) self.session = self.mps.session self.rt_session = self.mps.runtime_session self.mps_names = MpsName(self.session)
def exportDeviceInputs(file, deviceInputs, session): mpsName = MpsName(session) for deviceInput in deviceInputs: # name = getDeviceInputName(session, deviceInput) name = mpsName.getDeviceInputName(deviceInput) fields = [] fields.append(('DESC', 'CR[{0}], CA[{1}], CH[{2}]'.format( deviceInput.channel.card.crate.number, deviceInput.channel.card.number, deviceInput.channel.number))) fields.append(('DTYP', 'asynUInt32Digital')) fields.append(('SCAN', '1 second')) fields.append(('ZNAM', '{0}'.format(deviceInput.channel.z_name))) fields.append(('ONAM', '{0}'.format(deviceInput.channel.o_name))) if deviceInput.channel.alarm_state == 0: fields.append(('ZSV', 'MAJOR')) fields.append(('OSV', 'NO_ALARM')) else: fields.append(('ZSV', 'NO_ALARM')) fields.append(('OSV', 'MAJOR')) fields.append( ('INP', '@asynMask(CENTRAL_NODE {0} 1 0)DEVICE_INPUT'.format( deviceInput.id))) printRecord(file, 'bi', '{0}_MPSC'.format(name), fields) #=== Begin Latch records ==== # Record for latched value fields[0] = (('DESC', 'CR[{0}], CA[{1}], CH[{2}] Latched Value'.format( deviceInput.channel.card.crate.number, deviceInput.channel.card.number, deviceInput.channel.number))) fields[7] = ( ('INP', '@asynMask(CENTRAL_NODE {0} 1 0)DEVICE_INPUT_LATCHED'.format( deviceInput.id))) printRecord(file, 'bi', '{0}_MPS'.format(name), fields) # Record to process unlatch value fields = [] fields.append(('DESC', 'CR[{0}], CA[{1}], CH[{2}] Unlatch'.format( deviceInput.channel.card.crate.number, deviceInput.channel.card.number, deviceInput.channel.number))) fields.append(('DTYP', 'asynUInt32Digital')) fields.append( ('OUT', '@asynMask(CENTRAL_NODE {0} 1 0)DEVICE_INPUT_UNLATCH'.format( deviceInput.id))) printRecord(file, 'bo', '{0}_UNLH'.format(name), fields) #=== End Latch records ==== #=== Begin Bypass records ==== # Bypass Value: used while bypass is active fields = [] fields.append(('DESC', 'Bypass Value')) fields.append(('DTYP', 'asynUInt32Digital')) fields.append(('ZNAM', '{0}'.format(deviceInput.channel.z_name))) fields.append(('ONAM', '{0}'.format(deviceInput.channel.o_name))) if deviceInput.channel.alarm_state == 0: fields.append(('ZSV', 'MAJOR')) fields.append(('OSV', 'NO_ALARM')) else: fields.append(('ZSV', 'NO_ALARM')) fields.append(('OSV', 'MAJOR')) fields.append( ('OUT', '@asynMask(CENTRAL_NODE {0} 1 0)DEVICE_INPUT_BYPV'.format( deviceInput.id))) fields.append(('VAL', '0')) fields.append(('PINI', 'YES')) printRecord(file, 'bo', '{0}_BYPV'.format(name), fields) # Bypass Status: shows if bypass is currently active or not fields = [] fields.append(('DESC', 'Bypass Status')) fields.append(('SCAN', '1 second')) fields.append(('DTYP', 'asynUInt32Digital')) fields.append(('ZNAM', 'Not Bypassed')) fields.append(('ONAM', 'Bypassed')) fields.append(('ZSV', 'NO_ALARM')) fields.append(('OSV', 'MAJOR')) fields.append( ('INP', '@asynMask(CENTRAL_NODE {0} 1 0)DEVICE_INPUT_BYPS'.format( deviceInput.id))) printRecord(file, 'bi', '{0}_BYPS'.format(name), fields) # Bypass Expiration Date: date/time in seconds since Unix epoch for bypass expiration fields = [] fields.append(('DESC', 'Bypass Expiration Date/Time')) fields.append(('DTYP', 'asynInt32')) fields.append(('EGU', 'Seconds')) fields.append(('VAL', '0')) fields.append(('PINI', 'YES')) fields.append( ('OUT', '@asyn(CENTRAL_NODE {0} 0)DEVICE_INPUT_BYPEXPDATE'.format( deviceInput.id))) printRecord(file, 'longout', '{0}_BYPD'.format(name), fields) fields = [] fields.append(('DESC', 'Bypass Expiration Date/Time String')) fields.append(('DTYP', 'asynOctetRead')) fields.append(('SCAN', '1 second')) fields.append(('VAL', 'Invalid')) fields.append(('PINI', 'YES')) fields.append( ('INP', '@asyn(CENTRAL_NODE {0} 0)DEVICE_INPUT_BYPEXPDATE_STRING'.format( deviceInput.id))) printRecord(file, 'stringin', '{0}_BYPD_STR'.format(name), fields) #=== End Bypass records ==== file.close()
def exportAnalogDevices(file, analogDevices, session): mpsName = MpsName(session) for analogDevice in analogDevices: # name = getAnalogDeviceName(session, analogDevice) name = mpsName.getAnalogDeviceName(analogDevice) # print name # All these queries are to get the threshold faults faultInputs = session.query(models.FaultInput).filter( models.FaultInput.device_id == analogDevice.id).all() for fi in faultInputs: faults = session.query( models.Fault).filter(models.Fault.id == fi.fault_id).all() for fa in faults: # print fa.name faultStates = session.query(models.FaultState).filter( models.FaultState.fault_id == fa.id).all() for state in faultStates: # print state.device_state.name bitIndex = 0 bitFound = False while not bitFound: b = (state.device_state.mask >> bitIndex) & 1 if b == 1: bitFound = True else: bitIndex = bitIndex + 1 if bitIndex == 32: done = True bitIndex = -1 if bitIndex == -1: print "ERROR: invalid threshold mask (" + hex( state.device_state.mask) exit(-1) # print name + ":" + state.device_state.name + ", mask: " + str(bitIndex) fields = [] fields.append( ('DESC', 'Crate[{0}], Card[{1}], Channel[{2}]'.format( analogDevice.channel.card.crate.number, analogDevice.channel.card.number, analogDevice.channel.number))) fields.append(('DTYP', 'asynUInt32Digital')) fields.append(('SCAN', '1 second')) fields.append(('ZNAM', 'IS_OK')) fields.append(('ONAM', 'IS_EXCEEDED')) fields.append(('ZSV', 'NO_ALARM')) fields.append(('OSV', 'MAJOR')) fields.append( ('INP', '@asynMask(CENTRAL_NODE {0} {1} 0)ANALOG_DEVICE'. format(analogDevice.id, state.device_state.mask))) printRecord( file, 'bi', '{0}:{1}_MPSC'.format(name, state.device_state.name), fields) #=== Begin Latch records ==== # Record for latched value fields = [] fields.append( ('DESC', 'CR[{0}], CA[{1}], CH[{2}] Latched Value'.format( analogDevice.channel.card.crate.number, analogDevice.channel.card.number, analogDevice.channel.number))) fields.append(('DTYP', 'asynUInt32Digital')) fields.append(('HIHI', '1')) # Alarm if value is non-zero fields.append(('SCAN', '1 second')) fields.append(('ZNAM', 'IS_OK')) fields.append(('ONAM', 'IS_EXCEEDED')) fields.append(( 'INP', '@asynMask(CENTRAL_NODE {0} {1} 0)ANALOG_DEVICE_LATCHED' .format(analogDevice.id, state.device_state.mask))) printRecord( file, 'bi', '{0}:{1}_MPS'.format(name, state.device_state.name), fields) # Record to process unlatch value fields = [] fields.append( ('DESC', 'CR[{0}], CA[{1}], CH[{2}] Unlatch'.format( analogDevice.channel.card.crate.number, analogDevice.channel.card.number, analogDevice.channel.number))) fields.append(('DTYP', 'asynUInt32Digital')) fields.append( ('OUT', '@asynMask(CENTRAL_NODE {0} 1 0)ANALOG_DEVICE_UNLATCH' .format(analogDevice.id))) printRecord( file, 'bo', '{0}:{1}_UNLH'.format(name, state.device_state.name), fields) #=== End Latch records ==== #=== Begin Bypass records ==== # Bypass Value: used while bypass is active fields = [] fields.append( ('DESC', 'Threshold bypass value for {0}'.format( analogDevice.channel.name))) fields.append(('DTYP', 'asynUInt32Digital')) fields.append(('VAL', '0')) fields.append(('PINI', 'YES')) fields.append(('ZNAM', 'IS_OK')) fields.append(('ONAM', 'IS_EXCEEDED')) fields.append(('ZSV', 'NO_ALARM')) fields.append(('OSV', 'MAJOR')) fields.append(('HIHI', '1')) # Alarm if value is non-zero fields.append( ('OUT', '@asynMask(CENTRAL_NODE {0} 0 {1})ANALOG_DEVICE_BYPV'. format(analogDevice.id, state.device_state.mask))) printRecord( file, 'bo', '{0}:{1}_BYPV'.format(name, state.device_state.name), fields) # Bypass Status: shows if bypass is currently active or not fields = [] fields.append(('DESC', 'Bypass Status')) fields.append(('SCAN', '1 second')) fields.append(('DTYP', 'asynUInt32Digital')) fields.append(('ZNAM', 'Not Bypassed')) fields.append(('ONAM', 'Bypassed')) fields.append(('ZSV', 'NO_ALARM')) fields.append(('OSV', 'MAJOR')) fields.append( ('INP', '@asynMask(CENTRAL_NODE {0} 1 0)ANALOG_DEVICE_BYPS'. format(analogDevice.id))) printRecord( file, 'bi', '{0}:{1}_BYPS'.format(name, state.device_state.name), fields) # Bypass Expiration Date: date/time in seconds since Unix epoch for bypass expiration fields = [] fields.append(('DESC', 'Bypass Expiration Date/Time')) fields.append(('DTYP', 'asynInt32')) fields.append(('VAL', '0')) fields.append(('PINI', 'YES')) fields.append( ('OUT', '@asyn(CENTRAL_NODE {0} 0)ANALOG_DEVICE_BYPEXPDATE'. format(analogDevice.id))) printRecord( file, 'longout', '{0}:{1}_BYPD'.format(name, state.device_state.name), fields) fields = [] fields.append( ('DESC', 'Bypass Expiration Date/Time String')) fields.append(('DTYP', 'asynOctetRead')) fields.append(('SCAN', '1 second')) fields.append(('VAL', 'Invalid')) fields.append(('PINI', 'YES')) fields.append(( 'INP', '@asyn(CENTRAL_NODE {0} 0)ANALOG_DEVICE_BYPEXPDATE_STRING' .format(analogDevice.id))) printRecord( file, 'stringin', '{0}:{1}_BYPD_STR'.format(name, state.device_state.name), fields) #=== End Bypass records ==== file.close()