def replace_endpoints(self, d): children = OrderedDict() for k, v in d.items(): assert isinstance(k, str_), "Expected string, got %s" % (k, ) if k == "typeid": assert v == self.typeid, \ "Dict has typeid %s but Class has %s" % (v, self.typeid) else: try: object.__getattribute__(self, k) except AttributeError: children[k] = deserialize_object(v, self.child_type_check) else: raise AttributeError( "Setting child %r would shadow an attribute" % (k, )) self.endpoints = list(children) for k, v in children.items(): self.set_endpoint_data(k, v, notify=False) if self.process: self.process.report_changes( [self.process_path, serialize_object(self)])
def replace_endpoints(self, d): children = OrderedDict() for k, v in d.items(): assert isinstance(k, str_), "Expected string, got %s" % (k,) if k == "typeid": assert v == self.typeid, \ "Dict has typeid %s but Class has %s" % (v, self.typeid) else: try: object.__getattribute__(self, k) except AttributeError: children[k] = deserialize_object(v, self.child_type_check) else: raise AttributeError( "Setting child %r would shadow an attribute" % (k,)) self.endpoints = list(children) for k, v in children.items(): self.set_endpoint_data(k, v, notify=False) if self.process: self.process.report_changes( [self.process_path, serialize_object(self)])
def update_configure_model( configure_model: MethodMeta, part_configure_infos: List[ConfigureParamsInfo]) -> None: # These will not be inserted as they already exist ignored = list(ConfigureHook.call_types) # Re-calculate the following required = [] metas = OrderedDict() defaults = OrderedDict() # First do the required arguments for k in configure_model.takes.required: required.append(k) metas[k] = configure_model.takes.elements[k] for info in part_configure_infos: for k in info.required: if k not in required + ignored: required.append(k) # TODO: moan about type changes, when != works... metas[k] = info.metas[k] # Now the default and optional for k in configure_model.takes.elements: if k not in required: metas[k] = configure_model.takes.elements[k] for info in part_configure_infos: for k, meta in info.metas.items(): if k not in required + ignored: # TODO: moan about type changes, when != works... metas[k] = meta if k in info.defaults: if isinstance(meta, TableMeta) and not min( m.writeable for m in meta.elements.values()): # This is a table with non-writeable rows, merge the # defaults together row by row rows = [] if k in defaults: rows += defaults[k].rows() rows += info.defaults[k].rows() assert meta.table_cls, "No Meta table class" defaults[k] = meta.table_cls.from_rows(rows) else: defaults[k] = info.defaults[k] # Copy and prepare values for takes and returns takes_metas = OrderedDict() returns_metas = OrderedDict() for k, v in metas.items(): takes_metas[k] = deserialize_object(v.to_dict(), VMeta) returns_metas[k] = deserialize_object(v.to_dict(), VMeta) returns_metas[k].set_writeable(False) # Set them on the model configure_model.takes.set_elements(takes_metas) configure_model.takes.set_required(required) configure_model.returns.set_elements(returns_metas) configure_model.returns.set_required(required) configure_model.set_defaults(defaults)
def set_parameters(self, parameters): """Parameters to Post to endpoint Args: parameters: Value to post to path """ if parameters is not None: parameters = OrderedDict( (deserialize_object(k, str_), serialize_object(v)) for k, v in parameters.items()) self.parameters = parameters
def set_elements(self, elements): """Set the elements dict from a serialized dict""" deserialized = OrderedDict() for k, v in elements.items(): if k != "typeid": k = deserialize_object(k, str_) v = deserialize_object(v, VMeta) if not v.label: v.set_label(camel_to_title(k)) deserialized[k] = v if hasattr(self, "elements"): # Stop old elements notifying for k, v in self.elements.items(): v.set_notifier_path(None, ()) for k, v in deserialized.items(): v.set_notifier_path(self.notifier, self.path + ["elements", k]) return self.set_endpoint_data("elements", deserialized)
def handle_changes(self, changes): for k, v in changes: self.changes[k] = v block_changes = OrderedDict() for full_field, val in list(self.changes.items()): block_name, field_name = full_field.split(".", 1) block_changes.setdefault(block_name, []).append(( field_name, full_field, val)) for block_name, field_changes in block_changes.items(): # Squash changes block_mri = "%s:%s" % (self.mri, block_name) try: block_controller = self.process.get_controller(block_mri) except ValueError: self.log.debug("Block %s not known", block_name) for _, full_field, _ in field_changes: self.changes.pop(full_field) else: with block_controller.changes_squashed: self.do_field_changes(block_name, field_changes)
class PandABoxTablePart(PandABoxFieldPart): """This will normally be instantiated by the PandABox assembly, not created in yaml""" def __init__(self, process, control, meta, block_name, field_name, writeable): super(PandABoxTablePart, self).__init__( process, control, meta, block_name, field_name, writeable) # Fill in the meta object with the correct headers columns = OrderedDict() self.fields = OrderedDict() fields = control.get_table_fields(block_name, field_name) for field_name, (bits_hi, bits_lo) in fields.items(): nbits = bits_hi - bits_lo + 1 if nbits < 1: raise ValueError("Bad bits %s:%s" % (bits_hi, bits_lo)) if nbits == 1: column_meta = BooleanArrayMeta(field_name) widget_tag = widget("checkbox") else: if nbits <= 8: dtype = "uint8" elif nbits <= 16: dtype = "uint16" elif nbits <= 32: dtype = "uint32" elif nbits <= 64: dtype = "uint64" else: raise ValueError("Bad bits %s:%s" % (bits_hi, bits_lo)) column_meta = NumberArrayMeta(dtype, field_name) widget_tag = widget("textinput") label, column_name = make_label_attr_name(field_name) column_meta.set_label(label) column_meta.set_tags([widget_tag]) columns[column_name] = column_meta self.fields[column_name] = (bits_hi, bits_lo) meta.set_elements(TableElementMap(columns)) def set_field(self, value): int_values = self.list_from_table(value) self.control.set_table(self.block_name, self.field_name, int_values) def _calc_nconsume(self): max_bits_hi = max(self.fields.values())[0] nconsume = int((max_bits_hi + 31) / 32) return nconsume def list_from_table(self, table): int_values = [] if self.fields: nconsume = self._calc_nconsume() for row in range(len(table[list(self.fields)[0]])): int_value = 0 for name, (bits_hi, bits_lo) in self.fields.items(): max_value = 2 ** (bits_hi - bits_lo + 1) field_value = int(table[name][row]) assert field_value < max_value, \ "Expected %s[%d] < %s, got %s" % ( name, row, max_value, field_value) int_value |= field_value << bits_lo # Split the big int into 32-bit numbers for i in range(nconsume): int_values.append(int_value & (2 ** 32 - 1)) int_value = int_value >> 32 return int_values def table_from_list(self, int_values): table = Table(self.meta) if self.fields: nconsume = self._calc_nconsume() for i in range(int(len(int_values) / nconsume)): int_value = 0 for c in range(nconsume): int_value += int(int_values[i*nconsume+c]) << (32 * c) row = [] for name, (bits_hi, bits_lo) in self.fields.items(): mask = 2 ** (bits_hi + 1) - 1 field_value = (int_value & mask) >> bits_lo row.append(field_value) table.append(row) return table
class PandABlocksTablePart(PandABlocksFieldPart): """This will normally be instantiated by the PandABox assembly, not created in yaml""" def __init__(self, client, meta, block_name, field_name): # type: (AClient, AMeta, ABlockName, AFieldName) -> None # Fill in the meta object with the correct headers columns = OrderedDict() self.field_data = OrderedDict() fields = client.get_table_fields(block_name, field_name) if not fields: # Didn't put any metadata in, make some up fields["VALUE"] = TableFieldData(31, 0, "The Value", None) for column_name, field_data in fields.items(): nbits = field_data.bits_hi - field_data.bits_lo + 1 if nbits < 1: raise ValueError("Bad bits in %s" % (field_data, )) if field_data.labels: column_meta = ChoiceArrayMeta(choices=field_data.labels) widget = Widget.COMBO elif nbits == 1: column_meta = BooleanArrayMeta() widget = Widget.CHECKBOX else: if nbits <= 8: dtype = "uint8" elif nbits <= 16: dtype = "uint16" elif nbits <= 32: dtype = "uint32" elif nbits <= 64: dtype = "uint64" else: raise ValueError("Bad bits in %s" % (field_data, )) column_meta = NumberArrayMeta(dtype) widget = Widget.TEXTINPUT column_name = snake_to_camel(column_name) column_meta.set_label(camel_to_title(column_name)) column_meta.set_tags([widget.tag()]) column_meta.set_description(field_data.description) column_meta.set_writeable(True) columns[column_name] = column_meta self.field_data[column_name] = field_data meta.set_elements(columns) # Superclass will make the attribute for us super(PandABlocksTablePart, self).__init__(client, meta, block_name, field_name) def set_field(self, value): int_values = self.list_from_table(value) self.client.set_table(self.block_name, self.field_name, int_values) def _calc_nconsume(self): max_bits_hi = max(f.bits_hi for f in self.field_data.values()) nconsume = int((max_bits_hi + 31) / 32) return nconsume def list_from_table(self, table): int_values = [] nconsume = self._calc_nconsume() for row in table.rows(): int_value = 0 for name, value in zip(table.call_types, row): field_data = self.field_data[name] max_value = 2**(field_data.bits_hi - field_data.bits_lo + 1) if field_data.labels: field_value = field_data.labels.index(value) else: field_value = int(value) assert field_value < max_value, \ "Expected %s[%d] < %s, got %s" % ( name, row, max_value, field_value) int_value |= field_value << field_data.bits_lo # Split the big int into 32-bit numbers for i in range(nconsume): int_values.append(int_value & (2**32 - 1)) int_value = int_value >> 32 return int_values def table_from_list(self, int_values): rows = [] nconsume = self._calc_nconsume() for i in range(int(len(int_values) / nconsume)): int_value = 0 for c in range(nconsume): int_value += int(int_values[i * nconsume + c]) << (32 * c) row = [] for name, field_data in self.field_data.items(): mask = 2**(field_data.bits_hi + 1) - 1 field_value = (int_value & mask) >> field_data.bits_lo if field_data.labels: # This is a choice meta, so write the string value row.append(field_data.labels[field_value]) else: row.append(field_value) rows.append(row) table = self.meta.table_cls.from_rows(rows) return table
class Process(Loggable): """Hosts a number of Blocks, distributing requests between them""" def __init__(self, name, sync_factory): self.set_logger_name(name) self.name = name self.sync_factory = sync_factory self.q = self.create_queue() self._blocks = OrderedDict() # block_name -> Block self._controllers = OrderedDict() # block_name -> Controller self._block_state_cache = Cache() self._recv_spawned = None self._other_spawned = [] # lookup of all Subscribe requests, ordered to guarantee subscription # notification ordering # {Request.generate_key(): Subscribe} self._subscriptions = OrderedDict() self.comms = [] self._client_comms = OrderedDict() # client comms -> list of blocks self._handle_functions = { Post: self._forward_block_request, Put: self._forward_block_request, Get: self._handle_get, Subscribe: self._handle_subscribe, Unsubscribe: self._handle_unsubscribe, BlockChanges: self._handle_block_changes, BlockRespond: self._handle_block_respond, BlockAdd: self._handle_block_add, BlockList: self._handle_block_list, AddSpawned: self._add_spawned, } self.create_process_block() def recv_loop(self): """Service self.q, distributing the requests to the right block""" while True: request = self.q.get() self.log_debug("Received request %s", request) if request is PROCESS_STOP: # Got the sentinel, stop immediately break try: self._handle_functions[type(request)](request) except Exception as e: # pylint:disable=broad-except self.log_exception("Exception while handling %s", request) try: request.respond_with_error(str(e)) except Exception: pass def add_comms(self, comms): assert not self._recv_spawned, \ "Can't add comms when process has been started" self.comms.append(comms) def start(self): """Start the process going""" self._recv_spawned = self.sync_factory.spawn(self.recv_loop) for comms in self.comms: comms.start() def stop(self, timeout=None): """Stop the process and wait for it to finish Args: timeout (float): Maximum amount of time to wait for each spawned process. None means forever """ assert self._recv_spawned, "Process not started" self.q.put(PROCESS_STOP) for comms in self.comms: comms.stop() # Wait for recv_loop to complete first self._recv_spawned.wait(timeout=timeout) # Now wait for anything it spawned to complete for s, _ in self._other_spawned: s.wait(timeout=timeout) # Garbage collect the syncfactory del self.sync_factory def _forward_block_request(self, request): """Lookup target Block and spawn block.handle_request(request) Args: request (Request): The message that should be passed to the Block """ block_name = request.endpoint[0] block = self._blocks[block_name] spawned = self.sync_factory.spawn(block.handle_request, request) self._add_spawned(AddSpawned(spawned, block.handle_request)) def create_queue(self): """ Create a queue using sync_factory object Returns: Queue: New queue """ return self.sync_factory.create_queue() def create_lock(self): """ Create a lock using sync_factory object Returns: New lock object """ return self.sync_factory.create_lock() def spawn(self, function, *args, **kwargs): """Calls SyncFactory.spawn()""" def catching_function(): try: function(*args, **kwargs) except Exception: self.log_exception( "Exception calling %s(*%s, **%s)", function, args, kwargs) raise spawned = self.sync_factory.spawn(catching_function) request = AddSpawned(spawned, function) self.q.put(request) return spawned def _add_spawned(self, request): spawned = self._other_spawned self._other_spawned = [] spawned.append((request.spawned, request.function)) # Filter out the spawned that have completed to stop memory leaks for sp, f in spawned: if not sp.ready(): self._other_spawned.append((sp, f)) def get_client_comms(self, block_name): for client_comms, blocks in list(self._client_comms.items()): if block_name in blocks: return client_comms def create_process_block(self): self.process_block = Block() # TODO: add a meta here children = OrderedDict() children["blocks"] = StringArrayMeta( description="Blocks hosted by this Process" ).make_attribute([]) children["remoteBlocks"] = StringArrayMeta( description="Blocks reachable via ClientComms" ).make_attribute([]) self.process_block.replace_endpoints(children) self.process_block.set_process_path(self, [self.name]) self.add_block(self.process_block, self) def update_block_list(self, client_comms, blocks): self.q.put(BlockList(client_comms=client_comms, blocks=blocks)) def _handle_block_list(self, request): self._client_comms[request.client_comms] = request.blocks remotes = [] for blocks in self._client_comms.values(): remotes += [b for b in blocks if b not in remotes] self.process_block["remoteBlocks"].set_value(remotes) def _handle_block_changes(self, request): """Update subscribers with changes and applies stored changes to the cached structure""" # update cached dict subscription_changes = self._block_state_cache.apply_changes( *request.changes) # Send out the changes for subscription, changes in subscription_changes.items(): if subscription.delta: # respond with the filtered changes subscription.respond_with_delta(changes) else: # respond with the structure of everything # below the endpoint d = self._block_state_cache.walk_path(subscription.endpoint) subscription.respond_with_update(d) def report_changes(self, *changes): self.q.put(BlockChanges(changes=list(changes))) def block_respond(self, response, response_queue): self.q.put(BlockRespond(response, response_queue)) def _handle_block_respond(self, request): """Push the response to the required queue""" request.response_queue.put(request.response) def add_block(self, block, controller): """Add a block to be hosted by this process Args: block (Block): The block to be added controller (Controller): Its controller """ path = block.process_path assert len(path) == 1, \ "Expected block %r to have %r as parent, got path %r" % \ (block, self, path) name = path[0] assert name not in self._blocks, \ "There is already a block called %r" % name request = BlockAdd(block=block, controller=controller, name=name) if self._recv_spawned: # Started, so call in Process thread self.q.put(request) else: # Not started yet so we are safe to add in this thread self._handle_block_add(request) def _handle_block_add(self, request): """Add a block to be hosted by this process""" assert request.name not in self._blocks, \ "There is already a block called %r" % request.name self._blocks[request.name] = request.block self._controllers[request.name] = request.controller serialized = request.block.to_dict() change_request = BlockChanges([[[request.name], serialized]]) self._handle_block_changes(change_request) # Regenerate list of blocks self.process_block["blocks"].set_value(list(self._blocks)) def get_block(self, block_name): try: return self._blocks[block_name] except KeyError: if block_name in self.process_block.remoteBlocks: return self.make_client_block(block_name) else: raise def make_client_block(self, block_name): params = ClientController.MethodMeta.prepare_input_map( mri=block_name) controller = ClientController(self, {}, params) return controller.block def get_controller(self, block_name): return self._controllers[block_name] def _handle_subscribe(self, request): """Add a new subscriber and respond with the current sub-structure state""" key = request.generate_key() assert key not in self._subscriptions, \ "Subscription on %s already exists" % (key,) self._subscriptions[key] = request self._block_state_cache.add_subscriber(request, request.endpoint) d = self._block_state_cache.walk_path(request.endpoint) self.log_debug("Initial subscription value %s", d) if request.delta: request.respond_with_delta([[[], d]]) else: request.respond_with_update(d) def _handle_unsubscribe(self, request): """Remove a subscriber and respond with success or error""" key = request.generate_key() try: subscription = self._subscriptions.pop(key) except KeyError: request.respond_with_error( "No subscription found for %s" % (key,)) else: self._block_state_cache.remove_subscriber( subscription, subscription.endpoint) request.respond_with_return() def _handle_get(self, request): d = self._block_state_cache.walk_path(request.endpoint) request.respond_with_return(d)
class PandABlocksManagerController(ManagerController): def __init__(self, mri, # type: AMri config_dir, # type: AConfigDir hostname="localhost", # type: AHostname port=8888, # type: APort initial_design="", # type: AInitialDesign description="", # type: ADescription use_git=True, # type: AUseGit doc_url_base=DOC_URL_BASE, # type: ADocUrlBase poll_period=0.1 # type: APollPeriod ): # type: (...) -> None super(PandABlocksManagerController, self).__init__( mri, config_dir, initial_design, description, use_git) self._poll_period = poll_period self._doc_url_base = doc_url_base # {block_name: BlockData} self._blocks_data = {} # {block_name: {field_name: Part}} self._blocks_parts = OrderedDict() # src_attr -> [dest_attr] self._listening_attrs = {} # lut elements to be displayed or not # {fnum: {id: visible}} self._lut_elements = {} # changes left over from last time self.changes = OrderedDict() # The PandABlock client that does the comms self.client = PandABlocksClient(hostname, port, Queue) # Filled in on reset self._stop_queue = None self._poll_spawned = None def do_init(self): # start the poll loop and make block parts first to fill in our parts # before calling _set_block_children() self.start_poll_loop() super(PandABlocksManagerController, self).do_init() def start_poll_loop(self): # queue to listen for stop events if not self.client.started: self._stop_queue = Queue() if self.client.started: self.client.stop() self.client.start(self.process.spawn, socket) if not self._blocks_parts: self._make_blocks_parts() if self._poll_spawned is None: self._poll_spawned = self.process.spawn(self._poll_loop) def do_disable(self): super(PandABlocksManagerController, self).do_disable() self.stop_poll_loop() def do_reset(self): self.start_poll_loop() super(PandABlocksManagerController, self).do_reset() def _poll_loop(self): """At self.poll_period poll for changes""" next_poll = time.time() while True: next_poll += self._poll_period timeout = next_poll - time.time() if timeout < 0: timeout = 0 try: return self._stop_queue.get(timeout=timeout) except TimeoutError: # No stop, no problem pass try: self.handle_changes(self.client.get_changes()) except Exception: # TODO: should fault here? self.log.exception("Error while getting changes") def stop_poll_loop(self): if self._poll_spawned: self._stop_queue.put(None) self._poll_spawned.wait() self._poll_spawned = None if self.client.started: self.client.stop() def _make_blocks_parts(self): # {block_name_without_number: BlockData} self._blocks_data = OrderedDict() self._blocks_parts = OrderedDict() for block_rootname, block_data in self.client.get_blocks_data().items(): block_names = [] if block_data.number == 1: block_names.append(block_rootname) else: for i in range(block_data.number): block_names.append("%s%d" % (block_rootname, i + 1)) for block_name in block_names: self._blocks_data[block_name] = block_data self._make_parts(block_name, block_data) # Handle the initial set of changes to get an initial value self.handle_changes(self.client.get_changes()) # Then once more to let bit_outs toggle back self.handle_changes(()) assert not self.changes, "There are still changes %s" % self.changes def _make_child_controller(self, parts, mri): controller = BasicController(mri=mri) if mri.endswith("PCAP"): parts.append(PandABlocksActionPart( self.client, "*PCAP", "ARM", "Arm position capture", [])) parts.append(PandABlocksActionPart( self.client, "*PCAP", "DISARM", "Disarm position capture", [])) for part in parts: controller.add_part(part) return controller def _make_corresponding_part(self, block_name, mri): part = ChildPart(name=block_name, mri=mri, stateful=False) return part def _make_parts(self, block_name, block_data): mri = "%s:%s" % (self.mri, block_name) # Defer creation of parts to a block maker maker = PandABlocksMaker( self.client, block_name, block_data, self._doc_url_base) # Make the child controller and add it to the process controller = self._make_child_controller(maker.parts.values(), mri) self.process.add_controller(controller, timeout=5) # Store the parts so we can update them with the poller self._blocks_parts[block_name] = maker.parts # Make the corresponding part for us child_part = self._make_corresponding_part(block_name, mri) self.add_part(child_part) def _set_lut_icon(self, block_name): icon_attr = self._blocks_parts[block_name]["icon"].attr with open(os.path.join(SVG_DIR, "LUT.svg")) as f: svg_text = f.read() fnum = int(self.client.get_field(block_name, "FUNC.RAW"), 0) invis = self._get_lut_icon_elements(fnum) # https://stackoverflow.com/a/8998773 ET.register_namespace('', "http://www.w3.org/2000/svg") root = ET.fromstring(svg_text) for i in invis: # Find the first parent which has a child with id i parent = root.find('.//*[@id=%r]/..' % i) # Find the child and remove it child = parent.find('./*[@id=%r]' % i) parent.remove(child) svg_text = et_to_string(root) icon_attr.set_value(svg_text) def _get_lut_icon_elements(self, fnum): if not self._lut_elements: # Generate the lut element table # Do the general case funcs funcs = [("AND", operator.and_), ("OR", operator.or_)] for func, op in funcs: for nargs in (2, 3, 4, 5): # 2**nargs permutations for permutation in range(2 ** nargs): self._calc_visibility(func, op, nargs, permutation) # Add in special cases for NOT for ninp in "ABCDE": invis = {"AND", "OR", "LUT"} for inp in "ABCDE": if inp != ninp: invis.add(inp) invis.add("not%s" % inp) self._lut_elements[~LUT_CONSTANTS[ninp] & (2 ** 32 - 1)] = invis # And catchall for LUT in 0 invis = {"AND", "OR", "NOT"} for inp in "ABCDE": invis.add("not%s" % inp) self._lut_elements[0] = invis return self._lut_elements.get(fnum, self._lut_elements[0]) def _calc_visibility(self, func, op, nargs, permutations): # Visibility dictionary defaults invis = {"AND", "OR", "LUT", "NOT"} invis.remove(func) args = [] for i, inp in enumerate("EDCBA"): # xxxxx where x is 0 or 1 # EDCBA negations = format(permutations, '05b') if (5 - i) > nargs: # invisible invis.add(inp) invis.add("not%s" % inp) else: # visible if negations[i] == "1": args.append(~LUT_CONSTANTS[inp] & (2 ** 32 - 1)) else: invis.add("not%s" % inp) args.append(LUT_CONSTANTS[inp]) # Insert into table fnum = op(args[0], args[1]) for a in args[2:]: fnum = op(fnum, a) self._lut_elements[fnum] = invis def handle_changes(self, changes): for k, v in changes: self.changes[k] = v block_changes = OrderedDict() for full_field, val in list(self.changes.items()): block_name, field_name = full_field.split(".", 1) block_changes.setdefault(block_name, []).append(( field_name, full_field, val)) for block_name, field_changes in block_changes.items(): # Squash changes block_mri = "%s:%s" % (self.mri, block_name) try: block_controller = self.process.get_controller(block_mri) except ValueError: self.log.debug("Block %s not known", block_name) for _, full_field, _ in field_changes: self.changes.pop(full_field) else: with block_controller.changes_squashed: self.do_field_changes(block_name, field_changes) def do_field_changes(self, block_name, field_changes): for field_name, full_field, val in field_changes: ret = self.update_attribute(block_name, field_name, val) if ret is not None: self.changes[full_field] = ret else: self.changes.pop(full_field) # If it was LUT.FUNC then recalculate icon if block_name.startswith("LUT") and field_name == "FUNC": self._set_lut_icon(block_name) def update_attribute(self, block_name, field_name, value): ret = None parts = self._blocks_parts[block_name] if field_name not in parts: self.log.debug("Block %s has no field %s", block_name, field_name) return ret part = parts[field_name] attr = part.attr field_data = self._blocks_data[block_name].fields.get(field_name, None) if value == Exception: # TODO: set error self.log.warning("Field %s.%s in error", block_name, field_name) value = None # Cheaper than isinstance if attr.meta.typeid == TableMeta.typeid: value = part.table_from_list(value) elif attr.meta.typeid == BooleanMeta.typeid: value = bool(int(value)) is_bit_out = field_data and field_data.field_type == "bit_out" if is_bit_out and attr.value is value: # make bit_out things toggle while changing ret = value value = not value else: value = attr.meta.validate(value) # Update the value of our attribute and anyone listening ts = TimeStamp() attr.set_value_alarm_ts(value, Alarm.ok, ts) for dest_attr in self._listening_attrs.get(attr, []): dest_attr.set_value_alarm_ts(value, Alarm.ok, ts) # if we changed the value of a mux, update the slaved values if field_data and field_data.field_type in ("bit_mux", "pos_mux"): current_part = parts[field_name + ".CURRENT"] current_attr = current_part.attr self._update_current_attr( current_attr, value, ts, field_data.field_type) # if we changed a pos_out, its SCALE or OFFSET, update its scaled value root_field_name = field_name.split(".")[0] field_data = self._blocks_data[block_name].fields[root_field_name] if field_data.field_type == "pos_out": scale = parts[root_field_name + ".SCALE"].attr.value offset = parts[root_field_name + ".OFFSET"].attr.value scaled = parts[root_field_name].attr.value * scale + offset parts[root_field_name + ".SCALED"].attr.set_value_alarm_ts( scaled, Alarm.ok, ts) return ret def _update_current_attr(self, current_attr, mux_val, ts, field_type): # Remove the old current_attr from all lists for mux_set in self._listening_attrs.values(): try: mux_set.remove(current_attr) except KeyError: pass # add it to the list of things that need to update try: val = MUX_CONSTANT_VALUES[field_type][mux_val] except KeyError: mon_block_name, mon_field_name = mux_val.split(".", 1) mon_parts = self._blocks_parts[mon_block_name] out_attr = mon_parts[mon_field_name].attr self._listening_attrs.setdefault(out_attr, set()).add(current_attr) # update it to the right value val = out_attr.value current_attr.set_value_alarm_ts(val, Alarm.ok, ts)
class PandABlocksManagerController(ManagerController): def __init__(self, process, parts, params): super(PandABlocksManagerController, self).__init__(process, parts, params) # {block_name: BlockData} self._blocks_data = {} # {block_name: {field_name: Part}} self._blocks_parts = OrderedDict() # src_attr -> [dest_attr] self._listening_attrs = {} # (block_name, src_field_name) -> [dest_field_name] self._scale_offset_fields = {} # full_src_field -> [full_dest_field] self._mirrored_fields = {} # fields that need to inherit UNITS, SCALE and OFFSET from upstream self._inherit_scale = {} self._inherit_offset = {} # lut elements to be displayed or not # {fnum: {id: visible}} self._lut_elements = {} # changes left over from last time self.changes = OrderedDict() # The PandABlock client that does the comms self.client = PandABlocksClient(params.hostname, params.port, Queue) # Filled in on reset self._stop_queue = None self._poll_spawned = None def do_init(self): # start the poll loop and make block parts first to fill in our parts # before calling _set_block_children() self.start_poll_loop() super(PandABlocksManagerController, self).do_init() def start_poll_loop(self): # queue to listen for stop events if not self.client.started: self._stop_queue = Queue() if self.client.started: self.client.stop() from socket import socket if self.use_cothread: cothread = maybe_import_cothread() if cothread: from cothread.cosocket import socket self.client.start(self.spawn, socket) if not self._blocks_parts: self._make_blocks_parts() if self._poll_spawned is None: self._poll_spawned = self.spawn(self._poll_loop) def do_disable(self): super(PandABlocksManagerController, self).do_disable() self.stop_poll_loop() def do_reset(self): self.start_poll_loop() super(PandABlocksManagerController, self).do_reset() def _poll_loop(self): """At 10Hz poll for changes""" next_poll = time.time() while True: next_poll += 0.1 timeout = next_poll - time.time() if timeout < 0: timeout = 0 try: return self._stop_queue.get(timeout=timeout) except TimeoutError: # No stop, no problem pass try: self.handle_changes(self.client.get_changes()) except Exception: # TODO: should fault here? self.log.exception("Error while getting changes") def stop_poll_loop(self): if self._poll_spawned: self._stop_queue.put(None) self._poll_spawned.wait() self._poll_spawned = None if self.client.started: self.client.stop() def _make_blocks_parts(self): # {block_name_without_number: BlockData} self._blocks_data = OrderedDict() self._blocks_parts = OrderedDict() for block_rootname, block_data in self.client.get_blocks_data().items( ): block_names = [] if block_data.number == 1: block_names.append(block_rootname) else: for i in range(block_data.number): block_names.append("%s%d" % (block_rootname, i + 1)) for block_name in block_names: self._blocks_data[block_name] = block_data self._make_parts(block_name, block_data) # Handle the initial set of changes to get an initial value self.handle_changes(self.client.get_changes()) # Then once more to let bit_outs toggle back self.handle_changes({}) assert not self.changes, "There are still changes %s" % self.changes def _make_child_controller(self, parts, mri): controller = call_with_params(BasicController, self.process, parts, mri=mri) return controller def _make_corresponding_part(self, block_name, mri): part = call_with_params(ChildPart, name=block_name, mri=mri) return part def _make_parts(self, block_name, block_data): mri = "%s:%s" % (self.params.mri, block_name) # Defer creation of parts to a block maker maker = PandABlocksMaker(self.client, block_name, block_data) # Make the child controller and add it to the process controller = self._make_child_controller(maker.parts.values(), mri) self.process.add_controller(mri, controller) # Store the parts so we can update them with the poller self._blocks_parts[block_name] = maker.parts # setup param pos on a block with pos_out to inherit SCALE OFFSET UNITS pos_fields = [] pos_out_fields = [] pos_mux_inp_fields = [] for field_name, field_data in block_data.fields.items(): if field_name == "INP" and field_data.field_type == "pos_mux": pos_mux_inp_fields.append(field_name) elif field_data.field_type == "pos_out": pos_out_fields.append(field_name) elif field_data.field_subtype in ("pos", "relative_pos"): pos_fields.append(field_name) # Make sure pos_fields can get SCALE from somewhere if pos_fields: sources = pos_mux_inp_fields + pos_out_fields assert len(sources) == 1, \ "Expected one source of SCALE and OFFSET for %s, got %s" % ( pos_fields, sources) for field_name in pos_fields: self._map_scale_offset(block_name, sources[0], field_name) # Make the corresponding part for us child_part = self._make_corresponding_part(block_name, mri) self.add_part(child_part) def _map_scale_offset(self, block_name, src_field, dest_field): self._scale_offset_fields.setdefault((block_name, src_field), []).append(dest_field) if src_field == "INP": # mapping based on what it is connected to, defer return for suff in ("SCALE", "OFFSET", "UNITS"): full_src_field = "%s.%s.%s" % (block_name, src_field, suff) full_dest_field = "%s.%s.%s" % (block_name, dest_field, suff) self._mirrored_fields.setdefault(full_src_field, []).append(full_dest_field) def _set_lut_icon(self, block_name): icon_attr = self._blocks_parts[block_name]["icon"].attr with open(os.path.join(SVG_DIR, "LUT.svg")) as f: svg_text = f.read() fnum = int(self.client.get_field(block_name, "FUNC.RAW")) invis = self._get_lut_icon_elements(fnum) root = ET.fromstring(svg_text) for i in invis: # Find the first parent which has a child with id i parent = root.find('.//*[@id=%r]/..' % i) # Find the child and remove it child = parent.find('./*[@id=%r]' % i) parent.remove(child) svg_text = et_to_string(root) icon_attr.set_value(svg_text) def _get_lut_icon_elements(self, fnum): if not self._lut_elements: # Generate the lut element table # Do the general case funcs funcs = [("AND", operator.and_), ("OR", operator.or_)] for func, op in funcs: for nargs in (2, 3, 4, 5): # 2**nargs permutations for permutation in range(2**nargs): self._calc_visibility(func, op, nargs, permutation) # Add in special cases for NOT for ninp in "ABCDE": invis = {"AND", "OR", "LUT"} for inp in "ABCDE": if inp != ninp: invis.add(inp) invis.add("not%s" % inp) self._lut_elements[~LUT_CONSTANTS[ninp] & (2**32 - 1)] = invis # And catchall for LUT in 0 invis = {"AND", "OR", "NOT"} for inp in "ABCDE": invis.add("not%s" % inp) self._lut_elements[0] = invis return self._lut_elements.get(fnum, self._lut_elements[0]) def _calc_visibility(self, func, op, nargs, permutations): # Visibility dictionary defaults invis = {"AND", "OR", "LUT", "NOT"} invis.remove(func) args = [] for i, inp in enumerate("EDCBA"): # xxxxx where x is 0 or 1 # EDCBA negations = format(permutations, '05b') if (5 - i) > nargs: # invisible invis.add(inp) invis.add("not%s" % inp) else: # visible if negations[i] == "1": args.append(~LUT_CONSTANTS[inp] & (2**32 - 1)) else: invis.add("not%s" % inp) args.append(LUT_CONSTANTS[inp]) # Insert into table fnum = op(args[0], args[1]) for a in args[2:]: fnum = op(fnum, a) self._lut_elements[fnum] = invis def handle_changes(self, changes): for k, v in changes.items(): self.changes[k] = v for full_field, val in list(self.changes.items()): # If we have a mirrored field then fire off a request for dest_field in self._mirrored_fields.get(full_field, []): self.client.send("%s=%s\n" % (dest_field, val)) block_name, field_name = full_field.split(".", 1) ret = self.update_attribute(block_name, field_name, val) if ret is not None: self.changes[full_field] = ret else: self.changes.pop(full_field) # If it was LUT.FUNC then recalculate icon if block_name.startswith("LUT") and field_name == "FUNC": self._set_lut_icon(block_name) def update_attribute(self, block_name, field_name, val): ret = None if block_name not in self._blocks_parts: self.log.debug("Block %s not known", block_name) return parts = self._blocks_parts[block_name] if field_name not in parts: self.log.debug("Block %s has no field %s", block_name, field_name) return part = parts[field_name] attr = part.attr field_data = self._blocks_data[block_name].fields.get(field_name, None) if val == Exception: # TODO: set error val = None elif isinstance(attr.meta, BooleanMeta): val = bool(int(val)) is_bit_out = field_data and field_data.field_type == "bit_out" if is_bit_out and val == attr.value: # make bit_out things toggle while changing ret = val val = not val elif isinstance(attr.meta, TableMeta): val = part.table_from_list(val) # Update the value of our attribute and anyone listening attr.set_value(val) for dest_attr in self._listening_attrs.get(attr, []): dest_attr.set_value(val) # if we changed the value of a mux, update the slaved values if field_data and field_data.field_type in ("bit_mux", "pos_mux"): current_part = parts[field_name + ".CURRENT"] current_attr = current_part.attr self._update_current_attr(current_attr, val) if field_data.field_type == "pos_mux" and field_name == "INP": # all param pos fields should inherit scale and offset for dest_field_name in self._scale_offset_fields.get( (block_name, field_name), []): self._update_scale_offset_mapping(block_name, dest_field_name, val) return ret def _update_scale_offset_mapping(self, block_name, field_name, mux_val): # Find the fields that depend on this input field_data = self._blocks_data[block_name].fields.get(field_name, None) if field_data.field_subtype == "relative_pos": suffs = ("SCALE", "UNITS") else: suffs = ("SCALE", "OFFSET", "UNITS") for suff in suffs: full_src_field = "%s.%s" % (mux_val, suff) full_dest_field = "%s.%s.%s" % (block_name, field_name, suff) # Remove mirrored fields that are already in lists for field_list in self._mirrored_fields.values(): try: field_list.remove(full_dest_field) except ValueError: pass self._mirrored_fields.setdefault(full_src_field, []).append(full_dest_field) # update it to the right value if mux_val == "ZERO": value = dict(SCALE=1, OFFSET=0, UNITS="")[suff] else: mon_block_name, mon_field_name = mux_val.split(".", 1) mon_parts = self._blocks_parts[mon_block_name] src_attr = mon_parts["%s.%s" % (mon_field_name, suff)].attr value = src_attr.value self.client.send("%s=%s\n" % (full_dest_field, value)) def _update_current_attr(self, current_attr, mux_val): # Remove the old current_attr from all lists for mux_list in self._listening_attrs.values(): try: mux_list.remove(current_attr) except ValueError: pass # add it to the list of things that need to update if mux_val == "ZERO": current_attr.set_value(0) elif mux_val == "ONE": current_attr.set_value(1) else: mon_block_name, mon_field_name = mux_val.split(".", 1) mon_parts = self._blocks_parts[mon_block_name] out_attr = mon_parts[mon_field_name].attr self._listening_attrs.setdefault(out_attr, []).append(current_attr) # update it to the right value current_attr.set_value(out_attr.value)
class Controller(Loggable): use_cothread = True # Attributes health = None def __init__(self, process, mri, parts, description=""): super(Controller, self).__init__(mri=mri) self.process = process self.mri = mri self._request_queue = Queue() # {Part: Alarm} for current faults self._faults = {} # {Hook: name} self._hook_names = {} # {Hook: {Part: func_name}} self._hooked_func_names = {} self._find_hooks() # {part_name: (field_name, Model, setter) self.part_fields = OrderedDict() # {name: Part} self.parts = OrderedDict() self._lock = RLock(self.use_cothread) self._block = BlockModel() self._block.meta.set_description(description) self.set_label(mri) for part in parts: self.add_part(part) self._notifier = Notifier(mri, self._lock, self._block) self._block.set_notifier_path(self._notifier, [mri]) self._write_functions = {} self._add_block_fields() def set_label(self, label): """Set the label of the Block Meta object""" self._block.meta.set_label(label) def add_part(self, part): assert part.name not in self.parts, \ "Part %r already exists in Controller %r" % (part.name, self.mri) part.attach_to_controller(self) # Check part hooks into one of our hooks for func_name, part_hook, _ in get_hook_decorated(part): assert part_hook in self._hook_names, \ "Part %s func %s not hooked into %s" % ( part.name, func_name, self) self._hooked_func_names[part_hook][part] = func_name part_fields = list(part.create_attribute_models()) + \ list(part.create_method_models()) self.parts[part.name] = part self.part_fields[part.name] = part_fields def _find_hooks(self): for name, member in inspect.getmembers(self, Hook.isinstance): assert member not in self._hook_names, \ "Hook %s already in %s as %s" % ( self, name, self._hook_names[member]) self._hook_names[member] = name self._hooked_func_names[member] = {} def _add_block_fields(self): for iterable in (self.create_attribute_models(), self.create_method_models(), self.initial_part_fields()): for name, child, writeable_func in iterable: self.add_block_field(name, child, writeable_func) def add_block_field(self, name, child, writeable_func): if writeable_func: self._write_functions[name] = writeable_func if isinstance(child, AttributeModel): if writeable_func: child.meta.set_writeable(True) if not child.meta.label: child.meta.set_label(camel_to_title(name)) elif isinstance(child, MethodModel): if writeable_func: child.set_writeable(True) for k, v in child.takes.elements.items(): v.set_writeable(True) if not child.label: child.set_label(camel_to_title(name)) else: raise ValueError("Invalid block field %r" % child) self._block.set_endpoint_data(name, child) def create_method_models(self): """Provide MethodModel instances to be attached to BlockModel Yields: tuple: (string name, MethodModel, callable post_function). """ return get_method_decorated(self) def create_attribute_models(self): """Provide AttributeModel instances to be attached to BlockModel Yields: tuple: (string name, AttributeModel, callable put_function). """ # Create read-only attribute to show error texts meta = HealthMeta("Displays OK or an error message") self.health = meta.create_attribute_model() yield "health", self.health, None def initial_part_fields(self): for part_fields in self.part_fields.values(): for data in part_fields: yield data def spawn(self, func, *args, **kwargs): """Spawn a function in the right thread""" spawned = self.process.spawn(func, args, kwargs, self.use_cothread) return spawned @property @contextmanager def lock_released(self): self._lock.release() try: yield finally: self._lock.acquire() @property def changes_squashed(self): return self._notifier.changes_squashed def update_health(self, part, alarm=None): """Set the health attribute. Called from part""" if alarm is not None: alarm = deserialize_object(alarm, Alarm) with self.changes_squashed: if alarm is None or not alarm.severity: self._faults.pop(part, None) else: self._faults[part] = alarm if self._faults: # Sort them by severity faults = sorted(self._faults.values(), key=lambda a: a.severity) alarm = faults[-1] text = faults[-1].message else: alarm = None text = "OK" self.health.set_value(text, alarm=alarm) def block_view(self): """Get a view of the block we control Returns: Block: The block we control """ context = Context(self.process) return self.make_view(context) def make_view(self, context, data=None, child_name=None): """Make a child View of data[child_name]""" try: return self._make_view(context, data, child_name) except WrongThreadError: # called from wrong thread, spawn it again result = self.spawn(self._make_view, context, data, child_name) return result.get() def _make_view(self, context, data, child_name): """Called in cothread's thread""" with self._lock: if data is None: child = self._block else: child = data[child_name] child_view = self._make_appropriate_view(context, child) return child_view def _make_appropriate_view(self, context, data): if isinstance(data, BlockModel): # Make an Block View return make_block_view(self, context, data) elif isinstance(data, AttributeModel): # Make an Attribute View return Attribute(self, context, data) elif isinstance(data, MethodModel): # Make a Method View return Method(self, context, data) elif isinstance(data, Model): # Make a generic View of it return make_view(self, context, data) elif isinstance(data, dict): # Need to recurse down d = OrderedDict() for k, v in data.items(): d[k] = self._make_appropriate_view(context, v) return d elif isinstance(data, list): # Need to recurse down return [self._make_appropriate_view(context, x) for x in data] else: return data def handle_request(self, request): """Spawn a new thread that handles Request""" # Put data on the queue, so if spawns are handled out of order we # still get the most up to date data self._request_queue.put(request) return self.spawn(self._handle_request) def _handle_request(self): responses = [] with self._lock: # We spawned just above, so there is definitely something on the # queue request = self._request_queue.get(timeout=0) # self.log.debug(request) if isinstance(request, Get): handler = self._handle_get elif isinstance(request, Put): handler = self._handle_put elif isinstance(request, Post): handler = self._handle_post elif isinstance(request, Subscribe): handler = self._notifier.handle_subscribe elif isinstance(request, Unsubscribe): handler = self._notifier.handle_unsubscribe else: raise UnexpectedError("Unexpected request %s", request) try: responses += handler(request) except Exception as e: responses.append(request.error_response(e)) for cb, response in responses: try: cb(response) except Exception as e: self.log.exception("Exception notifying %s", response) raise def _handle_get(self, request): """Called with the lock taken""" data = self._block for endpoint in request.path[1:]: try: data = data[endpoint] except KeyError: if hasattr(data, "typeid"): typ = data.typeid else: typ = type(data) raise UnexpectedError( "Object of type %r has no attribute %r" % (typ, endpoint)) serialized = serialize_object(data) ret = [request.return_response(serialized)] return ret def _handle_put(self, request): """Called with the lock taken""" attribute_name = request.path[1] attribute = self._block[attribute_name] assert attribute.meta.writeable, \ "Attribute %s is not writeable" % attribute_name put_function = self._write_functions[attribute_name] with self.lock_released: result = put_function(request.value) ret = [request.return_response(result)] return ret def _handle_post(self, request): """Called with the lock taken""" method_name = request.path[1] if request.parameters: param_dict = request.parameters else: param_dict = {} method = self._block[method_name] assert method.writeable, \ "Method %s is not writeable" % method_name args = method.prepare_call_args(**param_dict) post_function = self._write_functions[method_name] with self.lock_released: result = post_function(*args) result = self.validate_result(method_name, result) ret = [request.return_response(result)] return ret def validate_result(self, method_name, result): with self._lock: method = self._block[method_name] # Prepare output map if method.returns.elements: result = Map(method.returns, result) result.check_valid() return result def create_part_contexts(self): part_contexts = {} for part_name, part in self.parts.items(): part_contexts[part] = Context(self.process) return part_contexts def run_hook(self, hook, part_contexts, *args, **params): hook_queue, hook_runners = self.start_hook( hook, part_contexts, *args, **params) return_dict = self.wait_hook(hook_queue, hook_runners) return return_dict def start_hook(self, hook, part_contexts, *args, **params): assert hook in self._hook_names, \ "Hook %s doesn't appear in controller hooks %s" % ( hook, self._hook_names) hook_name = self._hook_names[hook] self.log.debug("%s: Starting hook", hook_name) # This queue will hold (part, result) tuples hook_queue = Queue() hook_queue.hook_name = hook_name hook_runners = {} # now start them off # Take the lock so that no hook abort can come in between now and # the spawn of the context with self._lock: for part, context in part_contexts.items(): # context might have been aborted but have nothing servicing # the queue, we still want the legitimate messages on the queue # so just tell it to ignore stops it got before now context.ignore_stops_before_now() func_name = self._hooked_func_names[hook].get(part, None) if func_name: hook_runners[part] = part.make_hook_runner( hook_queue, func_name, weakref.proxy(context), *args, **params) return hook_queue, hook_runners def wait_hook(self, hook_queue, hook_runners): # Wait for them all to finish return_dict = {} start = time.time() while hook_runners: part, ret = hook_queue.get() hook_runner = hook_runners.pop(part) # Wait for the process to terminate hook_runner.wait() return_dict[part.name] = ret duration = time.time() - start if hook_runners: self.log.debug( "%s: Part %s returned %r after %ss. Still waiting for %s", hook_queue.hook_name, part.name, ret, duration, [p.name for p in hook_runners]) else: self.log.debug( "%s: Part %s returned %r after %ss. Returning...", hook_queue.hook_name, part.name, ret, duration) if isinstance(ret, Exception): if not isinstance(ret, AbortedError): # If AbortedError, all tasks have already been stopped. # Got an error, so stop and wait all hook runners for h in hook_runners.values(): h.stop() # Wait for them to finish for h in hook_runners.values(): h.wait(timeout=ABORT_TIMEOUT) raise ret return return_dict
def set_parameters(self, parameters): if parameters is not None: parameters = OrderedDict( (deserialize_object(k, str_), serialize_object(v)) for k, v in parameters.items()) self.set_endpoint_data("parameters", parameters)
class Process(Loggable): """Hosts a number of Blocks, distributing requests between them""" def __init__(self, name, sync_factory): self.set_logger_name(name) self.name = name self.sync_factory = sync_factory self.q = self.create_queue() self._blocks = OrderedDict() # block_name -> Block self._controllers = OrderedDict() # block_name -> Controller self._block_state_cache = Cache() self._recv_spawned = None self._other_spawned = [] # lookup of all Subscribe requests, ordered to guarantee subscription # notification ordering # {Request.generate_key(): Subscribe} self._subscriptions = OrderedDict() self.comms = [] self._client_comms = OrderedDict() # client comms -> list of blocks self._handle_functions = { Post: self._forward_block_request, Put: self._forward_block_request, Get: self._handle_get, Subscribe: self._handle_subscribe, Unsubscribe: self._handle_unsubscribe, BlockChanges: self._handle_block_changes, BlockRespond: self._handle_block_respond, BlockAdd: self._handle_block_add, BlockList: self._handle_block_list, AddSpawned: self._add_spawned, } self.create_process_block() def recv_loop(self): """Service self.q, distributing the requests to the right block""" while True: request = self.q.get() self.log_debug("Received request %s", request) if request is PROCESS_STOP: # Got the sentinel, stop immediately break try: self._handle_functions[type(request)](request) except Exception as e: # pylint:disable=broad-except self.log_exception("Exception while handling %s", request) try: request.respond_with_error(str(e)) except Exception: pass def add_comms(self, comms): assert not self._recv_spawned, \ "Can't add comms when process has been started" self.comms.append(comms) def start(self): """Start the process going""" self._recv_spawned = self.sync_factory.spawn(self.recv_loop) for comms in self.comms: comms.start() def stop(self, timeout=None): """Stop the process and wait for it to finish Args: timeout (float): Maximum amount of time to wait for each spawned process. None means forever """ assert self._recv_spawned, "Process not started" self.q.put(PROCESS_STOP) for comms in self.comms: comms.stop() # Wait for recv_loop to complete first self._recv_spawned.wait(timeout=timeout) # Now wait for anything it spawned to complete for s, _ in self._other_spawned: s.wait(timeout=timeout) # Garbage collect the syncfactory del self.sync_factory def _forward_block_request(self, request): """Lookup target Block and spawn block.handle_request(request) Args: request (Request): The message that should be passed to the Block """ block_name = request.endpoint[0] block = self._blocks[block_name] spawned = self.sync_factory.spawn(block.handle_request, request) self._add_spawned(AddSpawned(spawned, block.handle_request)) def create_queue(self): """ Create a queue using sync_factory object Returns: Queue: New queue """ return self.sync_factory.create_queue() def create_lock(self): """ Create a lock using sync_factory object Returns: New lock object """ return self.sync_factory.create_lock() def spawn(self, function, *args, **kwargs): """Calls SyncFactory.spawn()""" def catching_function(): try: function(*args, **kwargs) except Exception: self.log_exception("Exception calling %s(*%s, **%s)", function, args, kwargs) raise spawned = self.sync_factory.spawn(catching_function) request = AddSpawned(spawned, function) self.q.put(request) return spawned def _add_spawned(self, request): spawned = self._other_spawned self._other_spawned = [] spawned.append((request.spawned, request.function)) # Filter out the spawned that have completed to stop memory leaks for sp, f in spawned: if not sp.ready(): self._other_spawned.append((sp, f)) def get_client_comms(self, block_name): for client_comms, blocks in list(self._client_comms.items()): if block_name in blocks: return client_comms def create_process_block(self): self.process_block = Block() # TODO: add a meta here children = OrderedDict() children["blocks"] = StringArrayMeta( description="Blocks hosted by this Process").make_attribute([]) children["remoteBlocks"] = StringArrayMeta( description="Blocks reachable via ClientComms").make_attribute([]) self.process_block.replace_endpoints(children) self.process_block.set_process_path(self, [self.name]) self.add_block(self.process_block, self) def update_block_list(self, client_comms, blocks): self.q.put(BlockList(client_comms=client_comms, blocks=blocks)) def _handle_block_list(self, request): self._client_comms[request.client_comms] = request.blocks remotes = [] for blocks in self._client_comms.values(): remotes += [b for b in blocks if b not in remotes] self.process_block["remoteBlocks"].set_value(remotes) def _handle_block_changes(self, request): """Update subscribers with changes and applies stored changes to the cached structure""" # update cached dict subscription_changes = self._block_state_cache.apply_changes( *request.changes) # Send out the changes for subscription, changes in subscription_changes.items(): if subscription.delta: # respond with the filtered changes subscription.respond_with_delta(changes) else: # respond with the structure of everything # below the endpoint d = self._block_state_cache.walk_path(subscription.endpoint) subscription.respond_with_update(d) def report_changes(self, *changes): self.q.put(BlockChanges(changes=list(changes))) def block_respond(self, response, response_queue): self.q.put(BlockRespond(response, response_queue)) def _handle_block_respond(self, request): """Push the response to the required queue""" request.response_queue.put(request.response) def add_block(self, block, controller): """Add a block to be hosted by this process Args: block (Block): The block to be added controller (Controller): Its controller """ path = block.process_path assert len(path) == 1, \ "Expected block %r to have %r as parent, got path %r" % \ (block, self, path) name = path[0] assert name not in self._blocks, \ "There is already a block called %r" % name request = BlockAdd(block=block, controller=controller, name=name) if self._recv_spawned: # Started, so call in Process thread self.q.put(request) else: # Not started yet so we are safe to add in this thread self._handle_block_add(request) def _handle_block_add(self, request): """Add a block to be hosted by this process""" assert request.name not in self._blocks, \ "There is already a block called %r" % request.name self._blocks[request.name] = request.block self._controllers[request.name] = request.controller serialized = request.block.to_dict() change_request = BlockChanges([[[request.name], serialized]]) self._handle_block_changes(change_request) # Regenerate list of blocks self.process_block["blocks"].set_value(list(self._blocks)) def get_block(self, block_name): try: return self._blocks[block_name] except KeyError: if block_name in self.process_block.remoteBlocks: return self.make_client_block(block_name) else: raise def make_client_block(self, block_name): params = ClientController.MethodMeta.prepare_input_map(mri=block_name) controller = ClientController(self, {}, params) return controller.block def get_controller(self, block_name): return self._controllers[block_name] def _handle_subscribe(self, request): """Add a new subscriber and respond with the current sub-structure state""" key = request.generate_key() assert key not in self._subscriptions, \ "Subscription on %s already exists" % (key,) self._subscriptions[key] = request self._block_state_cache.add_subscriber(request, request.endpoint) d = self._block_state_cache.walk_path(request.endpoint) self.log_debug("Initial subscription value %s", d) if request.delta: request.respond_with_delta([[[], d]]) else: request.respond_with_update(d) def _handle_unsubscribe(self, request): """Remove a subscriber and respond with success or error""" key = request.generate_key() try: subscription = self._subscriptions.pop(key) except KeyError: request.respond_with_error("No subscription found for %s" % (key, )) else: self._block_state_cache.remove_subscriber(subscription, subscription.endpoint) request.respond_with_return() def _handle_get(self, request): d = self._block_state_cache.walk_path(request.endpoint) request.respond_with_return(d)
def set_parameters(self, parameters): if parameters is not None: parameters = OrderedDict( (deserialize_object(k, str_), serialize_object(v)) for k, v in parameters.items()) self.set_endpoint_data("parameters", parameters)
class PandATablePart(PandAFieldPart): """This will normally be instantiated by the PandABox assembly, not created in yaml""" def __init__( self, client: AClient, meta: AMeta, block_name: ABlockName, field_name: AFieldName, ) -> None: # Fill in the meta object with the correct headers columns = OrderedDict() self.field_data = OrderedDict() fields = client.get_table_fields(block_name, field_name) if not fields: # Didn't put any metadata in, make some up fields["VALUE"] = TableFieldData(31, 0, "The Value", None, True) for column_name, field_data in fields.items(): nbits = field_data.bits_hi - field_data.bits_lo + 1 if nbits < 1: raise ValueError("Bad bits in %s" % (field_data, )) if field_data.labels: column_meta = ChoiceArrayMeta(choices=field_data.labels) widget = Widget.COMBO elif nbits == 1: column_meta = BooleanArrayMeta() widget = Widget.CHECKBOX else: dtype = get_dtype(nbits, field_data.signed) column_meta = NumberArrayMeta(dtype) widget = Widget.TEXTINPUT column_name = snake_to_camel(column_name) column_meta.set_label(camel_to_title(column_name)) column_meta.set_tags([widget.tag()]) column_meta.set_description(field_data.description) column_meta.set_writeable(True) columns[column_name] = column_meta self.field_data[column_name] = field_data meta.set_elements(columns) # Work out how many ints per row # TODO: this should be in the block data max_bits_hi = max(f.bits_hi for f in self.field_data.values()) self.ints_per_row = int((max_bits_hi + 31) / 32) # Superclass will make the attribute for us super().__init__(client, meta, block_name, field_name) def handle_change(self, value: str, ts: TimeStamp) -> None: value = self.table_from_list(value) self.attr.set_value_alarm_ts(value, Alarm.ok, ts) def set_field(self, value): int_values = self.list_from_table(value) self.client.set_table(self.block_name, self.field_name, int_values) def list_from_table(self, table): # Create a bit array we can contribute to nrows = len(table[list(self.field_data)[0]]) int_matrix = np.zeros((nrows, self.ints_per_row), dtype=np.uint32) # For each row, or the right bits of the int values for column_name, field_data in self.field_data.items(): column_value = table[column_name] if field_data.labels: # Choice, lookup indexes of the label values indexes = [field_data.labels.index(v) for v in column_value] column_value = np.array(indexes, dtype=np.uint32) else: # Array, unwrap to get the numpy array column_value = column_value.seq # Left shift the value so it is aligned with the int columns _, mask = get_nbits_mask(field_data) shifted_column = (column_value & mask) << field_data.bits_lo % 32 # Or it with what we currently have column_index = get_column_index(field_data) int_matrix[..., column_index] |= shifted_column.astype(np.uint32) # Flatten it to a list of uints int_values = int_matrix.reshape((nrows * self.ints_per_row, )) return int_values def table_from_list(self, int_values): columns = {} nrows = len(int_values) // self.ints_per_row # Convert to a 1D uint32 array u32 = np.array([int(x) for x in int_values], dtype=np.uint32) # Reshape to a 2D array int_matrix = u32.reshape((nrows, self.ints_per_row)) # Create the data for each column for column_name, field_data in self.field_data.items(): # Find the right int to operate on column_index = get_column_index(field_data) int_column = int_matrix[..., column_index] # Right shift data, and mask it nbits, mask = get_nbits_mask(field_data) shifted_column = (int_column >> field_data.bits_lo % 32) & mask # If we wanted labels, convert to values here if field_data.labels: column_value = [field_data.labels[i] for i in shifted_column] elif nbits == 1: column_value = shifted_column.astype(np.bool) else: # View as the correct type dtype = self.meta.elements[column_name].dtype column_value = shifted_column.astype(dtype) columns[column_name] = column_value # Create a table from it table = self.meta.validate(self.meta.table_cls(**columns)) return table
class PandABoxPoller(Spawnable, Loggable): def __init__(self, process, control): self.set_logger_name("PandABoxPoller(%s)" % control.hostname) self.process = process self.control = control # block_name -> BlockData self._block_data = {} # block_name -> {field_name: Part} self._parts = OrderedDict() # src_attr -> [dest_attr] self._listening_attrs = {} # (block_name, src_field_name) -> [dest_field_name] self._scale_offset_fields = {} # full_src_field -> [full_dest_field] self._mirrored_fields = {} # changes left over from last time self.changes = OrderedDict() # fields that need to inherit UNITS, SCALE and OFFSET from upstream self._inherit_scale = {} self._inherit_offset = {} self.q = process.create_queue() self.add_spawn_function(self.poll_loop, self.make_default_stop_func(self.q)) def make_panda_block(self, mri, block_name, block_data, parts=None, area_detector=False): # Validate and store block_data self._store_block_data(block_name, block_data) # Defer creation of parts to a block maker maker = PandABoxBlockMaker(self.process, self.control, block_name, block_data, area_detector) # Add in any extras we are passed if parts: for part in parts: maker.parts[part.name] = part # Make a controller params = DefaultController.MethodMeta.prepare_input_map(mri=mri) controller = DefaultController( self.process, maker.parts.values(), params) block = controller.block self._parts[block_name] = maker.parts # Set the initial block_url self._set_icon_url(block_name) return block def _set_icon_url(self, block_name): icon_attr = self._parts[block_name]["icon"].attr fname = block_name.rstrip("0123456789") if fname == "LUT": # TODO: Get fname from func pass # TODO: make relative url = "http://localhost:8080/path/to/%s" % fname icon_attr.set_value(url) def _store_block_data(self, block_name, block_data): self._block_data[block_name] = block_data # setup param pos on a block with pos_out to inherit SCALE OFFSET UNITS pos_fields = [] pos_out_fields = [] pos_mux_inp_fields = [] for field_name, field_data in block_data.fields.items(): if field_name == "INP" and field_data.field_type == "pos_mux": pos_mux_inp_fields.append(field_name) elif field_data.field_type == "pos_out": pos_out_fields.append(field_name) elif field_data.field_subtype in ("pos", "relative_pos"): pos_fields.append(field_name) # Make sure pos_fields can get SCALE from somewhere if pos_fields: sources = pos_mux_inp_fields + pos_out_fields assert len(sources) == 1, \ "Expected one source of SCALE and OFFSET for %s, got %s" % ( pos_fields, sources) for field_name in pos_fields: self._map_scale_offset(block_name, sources[0], field_name) def _map_scale_offset(self, block_name, src_field, dest_field): self._scale_offset_fields.setdefault( (block_name, src_field), []).append(dest_field) if src_field == "INP": # mapping based on what it is connected to, defer return for suff in ("SCALE", "OFFSET", "UNITS"): full_src_field = "%s.%s.%s" % (block_name, src_field, suff) full_dest_field = "%s.%s.%s" % (block_name, dest_field, suff) self._mirrored_fields.setdefault(full_src_field, []).append( full_dest_field) def poll_loop(self): """At 10Hz poll for changes""" next_poll = time.time() while True: next_poll += 0.1 timeout = next_poll - time.time() if timeout < 0: timeout = 0 try: message = self.q.get(timeout=timeout) if message is Spawnable.STOP: break except queue.Empty: # No problem pass try: self.handle_changes(self.control.get_changes()) except Exception: self.log_exception("Error while getting changes") def handle_changes(self, changes): for k, v in changes.items(): self.changes[k] = v for full_field, val in list(self.changes.items()): # If we have a mirrored field then fire off a request for dest_field in self._mirrored_fields.get(full_field, []): self.control.send("%s=%s\n" % (dest_field, val)) block_name, field_name = full_field.split(".", 1) ret = self.update_attribute(block_name, field_name, val) if ret is not None: self.changes[full_field] = ret else: self.changes.pop(full_field) # If it was LUT.FUNC then recalculate icon if block_name.startswith("LUT") and field_name == "FUNC": self._set_icon_url(block_name) def update_attribute(self, block_name, field_name, val): ret = None if block_name not in self._parts: self.log_debug("Block %s not known", block_name) return parts = self._parts[block_name] if field_name not in parts: self.log_debug("Block %s has no field %s", block_name, field_name) return part = parts[field_name] attr = part.attr field_data = self._block_data[block_name].fields.get(field_name, None) if val == Exception: # TODO: set error val = None elif isinstance(attr.meta, BooleanMeta): val = bool(int(val)) is_bit_out = field_data and field_data.field_type == "bit_out" if is_bit_out and val == attr.value: # make bit_out things toggle while changing ret = val val = not val elif isinstance(attr.meta, TableMeta): val = part.table_from_list(val) # Update the value of our attribute and anyone listening attr.set_value(val) for dest_attr in self._listening_attrs.get(attr, []): dest_attr.set_value(val) # if we changed the value of a mux, update the slaved values if field_data and field_data.field_type in ("bit_mux", "pos_mux"): val_part = parts[field_name + ".VAL"] val_attr = val_part.attr self._update_val_attr(val_attr, val) if field_data.field_type == "pos_mux" and field_name == "INP": # all param pos fields should inherit scale and offset for dest_field_name in self._scale_offset_fields.get( (block_name, field_name), []): self._update_scale_offset_mapping( block_name, dest_field_name, val) return ret def _update_scale_offset_mapping(self, block_name, field_name, mux_val): # Find the fields that depend on this input field_data = self._block_data[block_name].fields.get(field_name, None) if field_data.field_subtype == "relative_pos": suffs = ("SCALE", "UNITS") else: suffs = ("SCALE", "OFFSET", "UNITS") for suff in suffs: full_src_field = "%s.%s" % (mux_val, suff) full_dest_field = "%s.%s.%s" % (block_name, field_name, suff) # Remove mirrored fields that are already in lists for field_list in self._mirrored_fields.values(): try: field_list.remove(full_dest_field) except ValueError: pass self._mirrored_fields.setdefault(full_src_field, []).append( full_dest_field) # update it to the right value if mux_val == "ZERO": value = dict(SCALE=1, OFFSET=0, UNITS="")[suff] else: mon_block_name, mon_field_name = mux_val.split(".", 1) mon_parts = self._parts[mon_block_name] src_attr = mon_parts["%s.%s" % (mon_field_name, suff)].attr value = src_attr.value self.control.send("%s=%s\n" % (full_dest_field, value)) def _update_val_attr(self, val_attr, mux_val): # Remove the old val_attr from all lists for mux_list in self._listening_attrs.values(): try: mux_list.remove(val_attr) except ValueError: pass # add it to the list of things that need to update if mux_val == "ZERO": val_attr.set_value(0) elif mux_val == "ONE": val_attr.set_value(1) else: mon_block_name, mon_field_name = mux_val.split(".", 1) mon_parts = self._parts[mon_block_name] out_attr = mon_parts[mon_field_name].attr self._listening_attrs.setdefault(out_attr, []).append(val_attr) # update it to the right value val_attr.set_value(out_attr.value)