def recreate_from_others(self, method_metas, without=None): if without is None: without = [] defaults = OrderedDict() elements = OrderedDict() required = [] # Populate the intermediate data structures for method_meta in method_metas: for element in method_meta.takes.elements: if element not in without: # Serialize it to copy it serialized = method_meta.takes.elements[element].to_dict() elements[element] = serialized if element in method_meta.takes.required and element not in required: required.append(element) if element in method_meta.defaults: defaults.pop(element, None) defaults[element] = method_meta.defaults[element] # TODO: what about returns? # remove required args that are now defaulted required = [r for r in required if r not in defaults] # Update ourself from these structures takes = MapMeta() takes.set_elements(ElementMap(elements)) takes.set_required(required) self.set_takes(takes) self.set_defaults(defaults)
def replace_endpoints(self, d): children = OrderedDict() for k, v in d.items(): assert isinstance(k, str_), "Expected string, got %s" % (k, ) if k == "typeid": assert v == self.typeid, \ "Dict has typeid %s but Class has %s" % (v, self.typeid) else: try: object.__getattribute__(self, k) except AttributeError: children[k] = deserialize_object(v, self.child_type_check) else: raise AttributeError( "Setting child %r would shadow an attribute" % (k, )) self.endpoints = list(children) for k, v in children.items(): self.set_endpoint_data(k, v, notify=False) if self.process: self.process.report_changes( [self.process_path, serialize_object(self)])
def __init__(self, process, mri, parts, description=""): super(Controller, self).__init__(mri=mri) self.process = process self.mri = mri self._request_queue = Queue() # {Part: Alarm} for current faults self._faults = {} # {Hook: name} self._hook_names = {} # {Hook: {Part: func_name}} self._hooked_func_names = {} self._find_hooks() # {part_name: (field_name, Model, setter) self.part_fields = OrderedDict() # {name: Part} self.parts = OrderedDict() self._lock = RLock(self.use_cothread) self._block = BlockModel() self._block.meta.set_description(description) self.set_label(mri) for part in parts: self.add_part(part) self._notifier = Notifier(mri, self._lock, self._block) self._block.set_notifier_path(self._notifier, [mri]) self._write_functions = {} self._add_block_fields()
def __init__(self, mri, # type: AMri config_dir, # type: AConfigDir hostname="localhost", # type: AHostname port=8888, # type: APort initial_design="", # type: AInitialDesign description="", # type: ADescription use_git=True, # type: AUseGit doc_url_base=DOC_URL_BASE, # type: ADocUrlBase poll_period=0.1 # type: APollPeriod ): # type: (...) -> None super(PandABlocksManagerController, self).__init__( mri, config_dir, initial_design, description, use_git) self._poll_period = poll_period self._doc_url_base = doc_url_base # {block_name: BlockData} self._blocks_data = {} # {block_name: {field_name: Part}} self._blocks_parts = OrderedDict() # src_attr -> [dest_attr] self._listening_attrs = {} # lut elements to be displayed or not # {fnum: {id: visible}} self._lut_elements = {} # changes left over from last time self.changes = OrderedDict() # The PandABlock client that does the comms self.client = PandABlocksClient(hostname, port, Queue) # Filled in on reset self._stop_queue = None self._poll_spawned = None
def recreate_from_others(self, method_metas, without=()): defaults = OrderedDict() elements = OrderedDict() required = [] # Populate the intermediate data structures for method_meta in method_metas: for element in method_meta.takes.elements: if element not in without: # Serialize it to copy it serialized = method_meta.takes.elements[element].to_dict() elements[element] = serialized if element in method_meta.takes.required and \ element not in required: required.append(element) if element in method_meta.defaults: defaults.pop(element, None) defaults[element] = method_meta.defaults[element] # TODO: what about returns? # remove required args that are now defaulted required = [r for r in required if r not in defaults] # Update ourself from these structures takes = MapMeta() takes.set_elements(elements) takes.set_required(required) self.set_takes(takes) self.set_defaults(defaults)
def _prepare_map_meta(args, allow_defaults, defaults=None, elements=None, required=None): # prepare some data structures that will be used for the takes MapMeta if defaults is None: defaults = OrderedDict() if elements is None: elements = OrderedDict() if required is None: required = [] for index in range(0, len(args), 3): # pick out 3 arguments name = args[index] check_camel_case(name) meta = args[index + 1] default = args[index + 2] # store them in the right structures elements[name] = meta if default is REQUIRED: required.append(name) elif default is not OPTIONAL: assert allow_defaults, \ "Defaults not allowed in this structure" defaults[name] = default # Setup the takes MapMeta and attach it to the function's MethodModel meta = MapMeta() meta.set_elements(elements) meta.set_required(required) return meta, defaults
def __init__(self, name, sync_factory): self.set_logger_name(name) self.name = name self.sync_factory = sync_factory self.q = self.create_queue() self._blocks = OrderedDict() # block_name -> Block self._controllers = OrderedDict() # block_name -> Controller self._block_state_cache = Cache() self._recv_spawned = None self._other_spawned = [] # lookup of all Subscribe requests, ordered to guarantee subscription # notification ordering # {Request.generate_key(): Subscribe} self._subscriptions = OrderedDict() self.comms = [] self._client_comms = OrderedDict() # client comms -> list of blocks self._handle_functions = { Post: self._forward_block_request, Put: self._forward_block_request, Get: self._handle_get, Subscribe: self._handle_subscribe, Unsubscribe: self._handle_unsubscribe, BlockChanges: self._handle_block_changes, BlockRespond: self._handle_block_respond, BlockAdd: self._handle_block_add, BlockList: self._handle_block_list, AddSpawned: self._add_spawned, } self.create_process_block()
def __init__(self, process, parts, params): super(PandABlocksManagerController, self).__init__(process, parts, params) # {block_name: BlockData} self._blocks_data = {} # {block_name: {field_name: Part}} self._blocks_parts = OrderedDict() # src_attr -> [dest_attr] self._listening_attrs = {} # (block_name, src_field_name) -> [dest_field_name] self._scale_offset_fields = {} # full_src_field -> [full_dest_field] self._mirrored_fields = {} # fields that need to inherit UNITS, SCALE and OFFSET from upstream self._inherit_scale = {} self._inherit_offset = {} # lut elements to be displayed or not # {fnum: {id: visible}} self._lut_elements = {} # changes left over from last time self.changes = OrderedDict() # The PandABlock client that does the comms self.client = PandABlocksClient(params.hostname, params.port, Queue) # Filled in on reset self._stop_queue = None self._poll_spawned = None
def __init__(self, process, control, meta, block_name, field_name, writeable): super(PandABoxTablePart, self).__init__( process, control, meta, block_name, field_name, writeable) # Fill in the meta object with the correct headers columns = OrderedDict() self.fields = OrderedDict() fields = control.get_table_fields(block_name, field_name) for field_name, (bits_hi, bits_lo) in fields.items(): nbits = bits_hi - bits_lo + 1 if nbits < 1: raise ValueError("Bad bits %s:%s" % (bits_hi, bits_lo)) if nbits == 1: column_meta = BooleanArrayMeta(field_name) widget_tag = widget("checkbox") else: if nbits <= 8: dtype = "uint8" elif nbits <= 16: dtype = "uint16" elif nbits <= 32: dtype = "uint32" elif nbits <= 64: dtype = "uint64" else: raise ValueError("Bad bits %s:%s" % (bits_hi, bits_lo)) column_meta = NumberArrayMeta(dtype, field_name) widget_tag = widget("textinput") label, column_name = make_label_attr_name(field_name) column_meta.set_label(label) column_meta.set_tags([widget_tag]) columns[column_name] = column_meta self.fields[column_name] = (bits_hi, bits_lo) meta.set_elements(TableElementMap(columns))
def stop(self, timeout=DEFAULT_TIMEOUT): """Stop the process and wait for it to finish Args: timeout (float): Maximum amount of time to wait for each spawned object. None means forever """ assert self.state == STARTED, "Process not started" self.state = STOPPING # Allow every controller a chance to clean up self._run_hook(ProcessStopHook, timeout=timeout) for s in self._spawned: if not s.ready(): self.log.debug( "Waiting for %s *%s **%s", s._function, s._args, s._kwargs ) try: s.wait(timeout=timeout) except TimeoutError: self.log.warning( "Timeout waiting for %s *%s **%s", s._function, s._args, s._kwargs ) raise self._spawned = [] self._controllers = OrderedDict() self._unpublished = set() self.state = STOPPED self.log.debug("Done process.stop()")
def replace_endpoints(self, d): children = OrderedDict() for k, v in d.items(): assert isinstance(k, str_), "Expected string, got %s" % (k,) if k == "typeid": assert v == self.typeid, \ "Dict has typeid %s but Class has %s" % (v, self.typeid) else: try: object.__getattribute__(self, k) except AttributeError: children[k] = deserialize_object(v, self.child_type_check) else: raise AttributeError( "Setting child %r would shadow an attribute" % (k,)) self.endpoints = list(children) for k, v in children.items(): self.set_endpoint_data(k, v, notify=False) if self.process: self.process.report_changes( [self.process_path, serialize_object(self)])
class StatefulStates(object): """The most basic Malcolm state machine""" RESETTING = "Resetting" DISABLED = "Disabled" DISABLING = "Disabling" FAULT = "Fault" READY = "Ready" def __init__(self): self._allowed = OrderedDict() # These are all the states we can possibly be in self.possible_states = [] self.create_block_transitions() self.create_error_disable_transitions() def create_block_transitions(self): self.set_allowed(self.RESETTING, self.READY) def create_error_disable_transitions(self): block_states = self.possible_states[:] # Set transitions for standard states for state in block_states: self.set_allowed(state, self.FAULT) self.set_allowed(state, self.DISABLING) self.set_allowed(self.FAULT, [self.RESETTING, self.DISABLING]) self.set_allowed(self.DISABLING, [self.FAULT, self.DISABLED]) self.set_allowed(self.DISABLED, self.RESETTING) def transition_allowed(self, initial_state, target_state): """ Check if a transition between two states is allowed Args: initial_state(str): Initial state target_state(str): Target state Returns: bool: True if allowed, False if not """ assert initial_state in self._allowed, \ "%s is not in %s" % (initial_state, list(self._allowed)) return target_state in self._allowed[initial_state] def set_allowed(self, initial_state, allowed_states): """Add an allowed transition state Args: initial_state (str): Initial state allowed_states (str or list): state or list of states that initial_state can transition to """ if not isinstance(allowed_states, list): allowed_states = [allowed_states] self._allowed.setdefault(initial_state, set()).update(allowed_states) for state in allowed_states + [initial_state]: if state not in self.possible_states: self.possible_states.append(state)
def __init__(self, name: str = "Process") -> None: self.set_logger(process_name=name) self.name = name self._controllers = OrderedDict() # mri -> Controller self._unpublished: Set[str] = set() # [mri] for unpublishable controllers self.state = STOPPED self._spawned: List[Spawned] = [] self._spawn_count = 0
def set_parameters(self, parameters): """Parameters to Post to endpoint Args: parameters: Value to post to path """ if parameters is not None: parameters = OrderedDict( (deserialize_object(k, str_), serialize_object(v)) for k, v in parameters.items()) self.parameters = parameters
def test_error_server_and_simple_client_bad_path_dict_attribute(self): msg = OrderedDict() msg["typeid"] = "malcolm:core/Get:1.0" msg["id"] = 0 msg["path"] = ("hello", "greet", "meta", "takes", "elements", "bad") IOLoopHelper.call(self.send_message, msg) resp = self.result.get(timeout=2) assert resp == dict( typeid="malcolm:core/Error:1.0", id=0, message=( "UnexpectedError: Object 'hello.greet.meta.takes.elements' " "of type %r has no attribute 'bad'") % type(OrderedDict()), )
def to_dict(self): d = OrderedDict() d["typeid"] = self.typeid # Add labels for compatibility with epics normative types labels = [] for column_name in self.meta.elements: column_meta = self.meta.elements[column_name] if column_meta.label: labels.append(column_meta.label) else: labels.append(column_name) d["labels"] = labels d.update(super(NTTable, self).to_dict()) return d
def _save_to_structure(self): structure = OrderedDict() structure["layout"] = OrderedDict() for name, x, y, visible in sorted( zip(self.layout.value.name, self.layout.value.x, self.layout.value.y, self.layout.value.visible)): layout_structure = OrderedDict() layout_structure["x"] = x layout_structure["y"] = y layout_structure["visible"] = visible structure["layout"][name] = layout_structure for part_name, part_structure in sorted(self.run_hook( self.Save, self.create_part_tasks()).items()): structure[part_name] = part_structure return structure
def save(self, task): part_structure = OrderedDict() for k in self.child: attr = self.child[k] if isinstance(attr, Attribute) and "config" in attr.meta.tags: part_structure[k] = serialize_object(attr.value) return part_structure
def setUp(self): self.callback = MagicMock() self.path = ["BL18I:XSPRESS3", "configure"] self.parameters = OrderedDict() self.parameters["filePath"] = "/path/to/file.h5" self.parameters["exposure"] = 0.1 self.o = Post(2, self.path, self.parameters, self.callback)
def test_update_squashing(self): # set some data self.block["attr"] = Dummy() self.block.attr["value"] = 32 self.block["attr2"] = Dummy() self.block.attr2["value"] = "st" # subscribe once and check initial response r1 = Subscribe(path=["b"], delta=True) r1.set_callback(Mock()) r2 = Subscribe(path=["b"]) r2.set_callback(Mock()) self.handle_subscribe(r1) self.handle_subscribe(r2) expected = OrderedDict() expected["attr"] = dict(value=32) expected["attr2"] = dict(value="st") self.assert_called_with(r1.callback, Delta(changes=[[[], expected]])) self.assert_called_with(r2.callback, Update(value=expected)) r1.callback.reset_mock() r2.callback.reset_mock() # squash two changes together with self.o.changes_squashed: self.block.attr["value"] = 33 self.o.add_squashed_change(["b", "attr", "value"], 33) assert self.block.attr.value == 33 self.block.attr2["value"] = "tr" self.o.add_squashed_change(["b", "attr2", "value"], "tr") assert self.block.attr2.value == "tr" self.assert_called_with( r1.callback, Delta( changes=[[["attr", "value"], 33], [["attr2", "value"], "tr"]])) expected["attr"]["value"] = 33 expected["attr2"]["value"] = "tr" self.assert_called_with(r2.callback, Update(value=expected))
def create_info(cls, configure_func: Callable) -> ConfigureParamsInfo: """Create a `ConfigureParamsInfo` describing the extra parameters that should be passed at configure""" call_types: Dict[str, Anno] = getattr(configure_func, "call_types", {}) metas = OrderedDict() required = [] defaults = OrderedDict() for k, anno in call_types.items(): if k not in cls.call_types: scls = VMeta.lookup_annotype_converter(anno) metas[k] = scls.from_annotype(anno, writeable=True) if anno.default is NO_DEFAULT: required.append(k) elif anno.default is not None: defaults[k] = anno.default return ConfigureParamsInfo(metas, required, defaults)
def setUp(self): self.d1 = dict(parta=[], partb=None) self.d2 = OrderedDict() self.d2["parta"] = [] self.d2["partb"] = [MyInfo("v1")] self.d2["partc"] = [MyInfo("v2"), MyInfo("v3")] self.d2["partd"] = None
def get_changes(self): changes = OrderedDict() table_queues = {} for line in self.send_recv("*CHANGES?\n"): if line.endswith("(error)"): field = line.split(" ", 1)[0] val = Exception elif "<" in line: # table field = line.rstrip("<") val = None table_queues[field] = self.send("%s?\n" % field) elif "=" in line: field, val = line.split("=", 1) else: self.log_warning("Can't parse line %r of changes", line) continue # TODO: Goes in server if val in ("POSITIONS.ZERO", "BITS.ZERO"): val = "ZERO" elif val == "BITS.ONE": val = "ONE" changes[field] = val for field, q in table_queues.items(): changes[field] = self.recv(q) return changes
def from_table( cls, table_cls: Type[Table], description: str, widget: Widget = None, writeable: List[str] = [], extra_tags: List[str] = [], ) -> "TableMeta": """Create a TableMeta object, using a Table subclass as the spec Args: table_cls: The Table class to read __init__ args from description: The description of the created Meta widget: The widget of the created Meta writeable: A list of the writeable field names. If there are any writeable fields then the whole Meta is writeable extra_tags: A list of tags to be added to the table meta """ elements = OrderedDict() for k, ct in table_cls.call_types.items(): subclass = cls.lookup_annotype_converter(ct) elements[k] = subclass.from_annotype(ct, writeable=k in writeable) ret = cls(description=description, elements=elements, writeable=bool(writeable)) if widget is None: widget = ret.default_widget() tags = [widget.tag()] tags.extend(extra_tags) ret.set_tags(tags) ret.set_table_cls(table_cls) return ret
def __init__(self, process): self.process = process self.q = self.process.create_queue() self._current_id = 1 self.requests = OrderedDict() self.add_spawn_function(self.send_loop, self.make_default_stop_func(self.q))
def set_elements(self, elements): """Set the elements dict from a serialized dict""" deserialized = OrderedDict() for k, v in elements.items(): if k != "typeid": k = deserialize_object(k, str_) v = deserialize_object(v, VMeta) if not v.label: v.set_label(camel_to_title(k)) deserialized[k] = v if hasattr(self, "elements"): # Stop old elements notifying for k, v in self.elements.items(): v.set_notifier_path(None, ()) for k, v in deserialized.items(): v.set_notifier_path(self.notifier, self.path + ["elements", k]) return self.set_endpoint_data("elements", deserialized)
def get_table_fields(self, block, field): fields = OrderedDict() for line in self.send_recv("%s.%s.FIELDS?\n" % (block, field)): bits_str, name = line.split(" ", 1) name = name.strip() bits = tuple(int(x) for x in bits_str.split(":")) fields[name] = bits return fields
def test_init(self): expected = OrderedDict() expected['Resetting'] = {'Ready', 'Fault', 'Disabling'} expected['Ready'] = {"Fault", "Disabling"} expected['Fault'] = {"Resetting", "Disabling"} expected['Disabling'] = {"Disabled", "Fault"} expected['Disabled'] = {"Resetting"} assert self.o._allowed == expected
def set_elements(self, elements): """Set the elements dict from a serialized dict""" deserialized = OrderedDict() for k, v in elements.items(): if k != "typeid": k = deserialize_object(k, str_) deserialized[k] = deserialize_object(v, VArrayMeta) return self.set_endpoint_data("elements", deserialized)
def create_attribute_models(self): for data in super(ManagerController, self).create_attribute_models(): yield data assert os.path.isdir(self.params.configDir), \ "%s is not a directory" % self.params.configDir if not os.path.isdir(os.path.join(self.params.configDir, ".git")): # Try and make it a git repo, don't care if it fails self._run_git_cmd("init") self._run_git_cmd("commit", "--allow-empty", "-m", "Created repo") # Create writeable attribute table for the layout info we need elements = OrderedDict() elements["name"] = StringArrayMeta("Name of layout part") elements["mri"] = StringArrayMeta("Malcolm full name of child block") elements["x"] = NumberArrayMeta( "float64", "X Coordinate of child block") elements["y"] = NumberArrayMeta( "float64", "Y Coordinate of child block") elements["visible"] = BooleanArrayMeta("Whether child block is visible") layout_table_meta = TableMeta( "Layout of child blocks", elements=elements, tags=[widget("flowgraph")]) layout_table_meta.set_writeable_in(ss.READY) self.layout = layout_table_meta.create_attribute_model() yield "layout", self.layout, self.set_layout # Create writeable attribute for loading an existing layout design_meta = ChoiceMeta( "Design name to load", tags=[config(), widget("combo")]) design_meta.set_writeable_in(ss.READY) self.design = design_meta.create_attribute_model() yield "design", self.design, self.set_design # Create writeable attribute table for the exported fields elements = OrderedDict() elements["name"] = ChoiceArrayMeta("Name of exported block.field") elements["exportName"] = StringArrayMeta( "Name of the field within current block") exports_table_meta = TableMeta( "Exported fields of child blocks", tags=[widget("table")], elements=elements) exports_table_meta.set_writeable_in(ss.READY) self.exports = exports_table_meta.create_attribute_model() yield "exports", self.exports, self.set_exports # Create read-only indicator for when things are modified modified_meta = BooleanMeta( "Whether the design is modified", tags=[widget("led")]) self.modified = modified_meta.create_attribute_model() yield "modified", self.modified, None
def test_init(self): expected = OrderedDict() expected["Resetting"] = {"Ready", "Fault", "Disabling"} expected["Ready"] = {"Fault", "Disabling"} expected["Fault"] = {"Resetting", "Disabling"} expected["Disabling"] = {"Disabled", "Fault"} expected["Disabled"] = {"Resetting"} assert self.o._allowed == expected
def wait_hooks( logger: Optional[logging.Logger], hook_queue: Queue, hook_spawned: List[Hook], timeout: float = None, exception_check: bool = True, ) -> Dict[str, List[Info]]: # timeout is time to wait for spawned processes to complete on abort, # not time for them to run for # Wait for them all to finish return_dict = OrderedDict() for spawned_hook in hook_spawned: return_dict[spawned_hook.child.name] = None start = time.time() hook_spawned_set = set(hook_spawned) while hook_spawned_set: hook: Hook ret: Any hook, ret = hook_queue.get() hook_spawned_set.remove(hook) # Wait for the process to terminate assert hook.spawned, "No spawned process" hook.spawned.wait(timeout) duration = time.time() - start if logger: if hook_spawned_set: logger.debug( "%s: Child %s returned %r after %ss. Still waiting for %s", hook.name, hook.child.name, ret, duration, [h.child.name for h in hook_spawned_set], ) else: logger.debug( "%s: Child %s returned %r after %ss. Returning...", hook.name, hook.child.name, ret, duration, ) if isinstance(ret, Exception) and exception_check: if not isinstance(ret, AbortedError): # If AbortedError, all tasks have already been stopped. # Got an error, so stop and wait all hook runners for h in hook_spawned: h.stop() # Wait for them to finish for h in hook_spawned: assert h.spawned, "No spawned functions" h.spawned.wait(timeout) raise ret else: return_dict[hook.child.name] = ret return return_dict
def save(self, context): child = context.block_view(self.params.mri) part_structure = OrderedDict() for k in child: attr = getattr(child, k) if isinstance(attr, Attribute) and "config" in attr.meta.tags: part_structure[k] = serialize_object(attr.value) self.saved_structure = part_structure return part_structure
def execute(self, args): self.log_debug("Execute %s method called on [%s] with: %s", self._method, self._block, args) self.log_debug("Structure: %s", args.getStructureDict()) # Acquire the lock with self._lock: try: # We now need to create the Post message and execute it endpoint = [self._block, self._method] request = Post(None, self._server.q, endpoint, self.parse_variants(args.toDict(True))) request.set_id(self._id) self._server.process.q.put(request) # Now wait for the Post reply self.log_debug("Waiting for reply") self.wait_for_reply(timeout=None) self.log_debug("Reply received %s %s", type(self._response), self._response) response_dict = OrderedDict() if isinstance(self._response, Return): response_dict = self._response["value"] self.log_debug("Response value : %s", response_dict) elif isinstance(self._response, Error): response_dict = self._response.to_dict() response_dict.pop("id") if not response_dict: pv_object = pvaccess.PvObject(OrderedDict(), 'malcolm:core/Map:1.0') else: #pv_object = self._server.dict_to_structure(response_dict) #self.log_debug("Pv Object structure created") #self.log_debug("%s", self._server.strip_type_id(response_dict)) #pv_object.set(self._server.strip_type_id(response_dict)) pv_object = self._server.dict_to_pv_object(response_dict) self.log_debug("Pv Object value set: %s", pv_object) # Add this RPC to the purge list #self._server.register_dead_rpc(self._id) return pv_object except Exception: self.log_exception("Request %s failed", self._request)
def __init__(self): self.set_logger_name(type(self).__name__) self.allowed_transitions = OrderedDict() self.busy_states = [] assert self.AFTER_RESETTING is not None, \ "No AFTER_RESETTING state given" self.set_allowed(self.RESETTING, self.AFTER_RESETTING) self.set_busy(self.RESETTING) self.create_states() custom_states = list(self.allowed_transitions) + [self.AFTER_RESETTING] # Set transitions for standard states for state in custom_states: self.set_allowed(state, self.FAULT) self.set_allowed(state, self.DISABLING) self.set_allowed(self.FAULT, [self.RESETTING, self.DISABLING]) self.set_allowed(self.DISABLING, [self.FAULT, self.DISABLED]) self.set_allowed(self.DISABLED, self.RESETTING) # These are all the states we can possibly be in self.possible_states = list(self.allowed_transitions)
def __init__(self, process, control): self.set_logger_name("PandABoxPoller(%s)" % control.hostname) self.process = process self.control = control # block_name -> BlockData self._block_data = {} # block_name -> {field_name: Part} self._parts = OrderedDict() # src_attr -> [dest_attr] self._listening_attrs = {} # (block_name, src_field_name) -> [dest_field_name] self._scale_offset_fields = {} # full_src_field -> [full_dest_field] self._mirrored_fields = {} # changes left over from last time self.changes = OrderedDict() # fields that need to inherit UNITS, SCALE and OFFSET from upstream self._inherit_scale = {} self._inherit_offset = {} self.q = process.create_queue() self.add_spawn_function(self.poll_loop, self.make_default_stop_func(self.q))
class StateMachine(Loggable): RESETTING = "Resetting" DISABLED = "Disabled" DISABLING = "Disabling" FAULT = "Fault" # Subclasses must override this AFTER_RESETTING = None def __init__(self): self.set_logger_name(type(self).__name__) self.allowed_transitions = OrderedDict() self.busy_states = [] assert self.AFTER_RESETTING is not None, \ "No AFTER_RESETTING state given" self.set_allowed(self.RESETTING, self.AFTER_RESETTING) self.set_busy(self.RESETTING) self.create_states() custom_states = list(self.allowed_transitions) + [self.AFTER_RESETTING] # Set transitions for standard states for state in custom_states: self.set_allowed(state, self.FAULT) self.set_allowed(state, self.DISABLING) self.set_allowed(self.FAULT, [self.RESETTING, self.DISABLING]) self.set_allowed(self.DISABLING, [self.FAULT, self.DISABLED]) self.set_allowed(self.DISABLED, self.RESETTING) # These are all the states we can possibly be in self.possible_states = list(self.allowed_transitions) def create_states(self): raise NotImplementedError() def is_allowed(self, initial_state, target_state): """ Check if a transition between two states is allowed Args: initial_state(str): Initial state target_state(str): Target state Returns: bool: True if allowed, False if not """ assert initial_state in self.allowed_transitions, \ "%s is not in %s" % (initial_state, list(self.allowed_transitions)) return target_state in self.allowed_transitions[initial_state] def set_allowed(self, initial_state, allowed_states): """ Add an allowed transition state Args: initial_state(str): Initial state allowed_states(list(str) / str): States that initial_state can transition to """ if not isinstance(allowed_states, list): allowed_states = [allowed_states] self.allowed_transitions.setdefault(initial_state, set()).update( allowed_states) def set_busy(self, state, busy=True): """ Set the busy-ness of a state; i.e. whether the block is considered to be busy in a certain state Args: state(str): State to update busy(bool): True or False for whether state is a busy state """ if not busy and state in self.busy_states: self.busy_states.remove(state) elif busy and state not in self.busy_states: self.busy_states.append(state) def is_busy(self, state): """ Check if a state is a busy state Args: state(str): State to check busy-ness for Returns: bool: True if state is a busy state, False if not """ return state in self.busy_states
def set_parameters(self, parameters): if parameters is not None: parameters = OrderedDict( (deserialize_object(k, str_), serialize_object(v)) for k, v in parameters.items()) self.set_endpoint_data("parameters", parameters)
class PandABoxPoller(Spawnable, Loggable): def __init__(self, process, control): self.set_logger_name("PandABoxPoller(%s)" % control.hostname) self.process = process self.control = control # block_name -> BlockData self._block_data = {} # block_name -> {field_name: Part} self._parts = OrderedDict() # src_attr -> [dest_attr] self._listening_attrs = {} # (block_name, src_field_name) -> [dest_field_name] self._scale_offset_fields = {} # full_src_field -> [full_dest_field] self._mirrored_fields = {} # changes left over from last time self.changes = OrderedDict() # fields that need to inherit UNITS, SCALE and OFFSET from upstream self._inherit_scale = {} self._inherit_offset = {} self.q = process.create_queue() self.add_spawn_function(self.poll_loop, self.make_default_stop_func(self.q)) def make_panda_block(self, mri, block_name, block_data, parts=None, area_detector=False): # Validate and store block_data self._store_block_data(block_name, block_data) # Defer creation of parts to a block maker maker = PandABoxBlockMaker(self.process, self.control, block_name, block_data, area_detector) # Add in any extras we are passed if parts: for part in parts: maker.parts[part.name] = part # Make a controller params = DefaultController.MethodMeta.prepare_input_map(mri=mri) controller = DefaultController( self.process, maker.parts.values(), params) block = controller.block self._parts[block_name] = maker.parts # Set the initial block_url self._set_icon_url(block_name) return block def _set_icon_url(self, block_name): icon_attr = self._parts[block_name]["icon"].attr fname = block_name.rstrip("0123456789") if fname == "LUT": # TODO: Get fname from func pass # TODO: make relative url = "http://localhost:8080/path/to/%s" % fname icon_attr.set_value(url) def _store_block_data(self, block_name, block_data): self._block_data[block_name] = block_data # setup param pos on a block with pos_out to inherit SCALE OFFSET UNITS pos_fields = [] pos_out_fields = [] pos_mux_inp_fields = [] for field_name, field_data in block_data.fields.items(): if field_name == "INP" and field_data.field_type == "pos_mux": pos_mux_inp_fields.append(field_name) elif field_data.field_type == "pos_out": pos_out_fields.append(field_name) elif field_data.field_subtype in ("pos", "relative_pos"): pos_fields.append(field_name) # Make sure pos_fields can get SCALE from somewhere if pos_fields: sources = pos_mux_inp_fields + pos_out_fields assert len(sources) == 1, \ "Expected one source of SCALE and OFFSET for %s, got %s" % ( pos_fields, sources) for field_name in pos_fields: self._map_scale_offset(block_name, sources[0], field_name) def _map_scale_offset(self, block_name, src_field, dest_field): self._scale_offset_fields.setdefault( (block_name, src_field), []).append(dest_field) if src_field == "INP": # mapping based on what it is connected to, defer return for suff in ("SCALE", "OFFSET", "UNITS"): full_src_field = "%s.%s.%s" % (block_name, src_field, suff) full_dest_field = "%s.%s.%s" % (block_name, dest_field, suff) self._mirrored_fields.setdefault(full_src_field, []).append( full_dest_field) def poll_loop(self): """At 10Hz poll for changes""" next_poll = time.time() while True: next_poll += 0.1 timeout = next_poll - time.time() if timeout < 0: timeout = 0 try: message = self.q.get(timeout=timeout) if message is Spawnable.STOP: break except queue.Empty: # No problem pass try: self.handle_changes(self.control.get_changes()) except Exception: self.log_exception("Error while getting changes") def handle_changes(self, changes): for k, v in changes.items(): self.changes[k] = v for full_field, val in list(self.changes.items()): # If we have a mirrored field then fire off a request for dest_field in self._mirrored_fields.get(full_field, []): self.control.send("%s=%s\n" % (dest_field, val)) block_name, field_name = full_field.split(".", 1) ret = self.update_attribute(block_name, field_name, val) if ret is not None: self.changes[full_field] = ret else: self.changes.pop(full_field) # If it was LUT.FUNC then recalculate icon if block_name.startswith("LUT") and field_name == "FUNC": self._set_icon_url(block_name) def update_attribute(self, block_name, field_name, val): ret = None if block_name not in self._parts: self.log_debug("Block %s not known", block_name) return parts = self._parts[block_name] if field_name not in parts: self.log_debug("Block %s has no field %s", block_name, field_name) return part = parts[field_name] attr = part.attr field_data = self._block_data[block_name].fields.get(field_name, None) if val == Exception: # TODO: set error val = None elif isinstance(attr.meta, BooleanMeta): val = bool(int(val)) is_bit_out = field_data and field_data.field_type == "bit_out" if is_bit_out and val == attr.value: # make bit_out things toggle while changing ret = val val = not val elif isinstance(attr.meta, TableMeta): val = part.table_from_list(val) # Update the value of our attribute and anyone listening attr.set_value(val) for dest_attr in self._listening_attrs.get(attr, []): dest_attr.set_value(val) # if we changed the value of a mux, update the slaved values if field_data and field_data.field_type in ("bit_mux", "pos_mux"): val_part = parts[field_name + ".VAL"] val_attr = val_part.attr self._update_val_attr(val_attr, val) if field_data.field_type == "pos_mux" and field_name == "INP": # all param pos fields should inherit scale and offset for dest_field_name in self._scale_offset_fields.get( (block_name, field_name), []): self._update_scale_offset_mapping( block_name, dest_field_name, val) return ret def _update_scale_offset_mapping(self, block_name, field_name, mux_val): # Find the fields that depend on this input field_data = self._block_data[block_name].fields.get(field_name, None) if field_data.field_subtype == "relative_pos": suffs = ("SCALE", "UNITS") else: suffs = ("SCALE", "OFFSET", "UNITS") for suff in suffs: full_src_field = "%s.%s" % (mux_val, suff) full_dest_field = "%s.%s.%s" % (block_name, field_name, suff) # Remove mirrored fields that are already in lists for field_list in self._mirrored_fields.values(): try: field_list.remove(full_dest_field) except ValueError: pass self._mirrored_fields.setdefault(full_src_field, []).append( full_dest_field) # update it to the right value if mux_val == "ZERO": value = dict(SCALE=1, OFFSET=0, UNITS="")[suff] else: mon_block_name, mon_field_name = mux_val.split(".", 1) mon_parts = self._parts[mon_block_name] src_attr = mon_parts["%s.%s" % (mon_field_name, suff)].attr value = src_attr.value self.control.send("%s=%s\n" % (full_dest_field, value)) def _update_val_attr(self, val_attr, mux_val): # Remove the old val_attr from all lists for mux_list in self._listening_attrs.values(): try: mux_list.remove(val_attr) except ValueError: pass # add it to the list of things that need to update if mux_val == "ZERO": val_attr.set_value(0) elif mux_val == "ONE": val_attr.set_value(1) else: mon_block_name, mon_field_name = mux_val.split(".", 1) mon_parts = self._parts[mon_block_name] out_attr = mon_parts[mon_field_name].attr self._listening_attrs.setdefault(out_attr, []).append(val_attr) # update it to the right value val_attr.set_value(out_attr.value)
class Process(Loggable): """Hosts a number of Blocks, distributing requests between them""" def __init__(self, name, sync_factory): self.set_logger_name(name) self.name = name self.sync_factory = sync_factory self.q = self.create_queue() self._blocks = OrderedDict() # block_name -> Block self._controllers = OrderedDict() # block_name -> Controller self._block_state_cache = Cache() self._recv_spawned = None self._other_spawned = [] # lookup of all Subscribe requests, ordered to guarantee subscription # notification ordering # {Request.generate_key(): Subscribe} self._subscriptions = OrderedDict() self.comms = [] self._client_comms = OrderedDict() # client comms -> list of blocks self._handle_functions = { Post: self._forward_block_request, Put: self._forward_block_request, Get: self._handle_get, Subscribe: self._handle_subscribe, Unsubscribe: self._handle_unsubscribe, BlockChanges: self._handle_block_changes, BlockRespond: self._handle_block_respond, BlockAdd: self._handle_block_add, BlockList: self._handle_block_list, AddSpawned: self._add_spawned, } self.create_process_block() def recv_loop(self): """Service self.q, distributing the requests to the right block""" while True: request = self.q.get() self.log_debug("Received request %s", request) if request is PROCESS_STOP: # Got the sentinel, stop immediately break try: self._handle_functions[type(request)](request) except Exception as e: # pylint:disable=broad-except self.log_exception("Exception while handling %s", request) try: request.respond_with_error(str(e)) except Exception: pass def add_comms(self, comms): assert not self._recv_spawned, \ "Can't add comms when process has been started" self.comms.append(comms) def start(self): """Start the process going""" self._recv_spawned = self.sync_factory.spawn(self.recv_loop) for comms in self.comms: comms.start() def stop(self, timeout=None): """Stop the process and wait for it to finish Args: timeout (float): Maximum amount of time to wait for each spawned process. None means forever """ assert self._recv_spawned, "Process not started" self.q.put(PROCESS_STOP) for comms in self.comms: comms.stop() # Wait for recv_loop to complete first self._recv_spawned.wait(timeout=timeout) # Now wait for anything it spawned to complete for s, _ in self._other_spawned: s.wait(timeout=timeout) # Garbage collect the syncfactory del self.sync_factory def _forward_block_request(self, request): """Lookup target Block and spawn block.handle_request(request) Args: request (Request): The message that should be passed to the Block """ block_name = request.endpoint[0] block = self._blocks[block_name] spawned = self.sync_factory.spawn(block.handle_request, request) self._add_spawned(AddSpawned(spawned, block.handle_request)) def create_queue(self): """ Create a queue using sync_factory object Returns: Queue: New queue """ return self.sync_factory.create_queue() def create_lock(self): """ Create a lock using sync_factory object Returns: New lock object """ return self.sync_factory.create_lock() def spawn(self, function, *args, **kwargs): """Calls SyncFactory.spawn()""" def catching_function(): try: function(*args, **kwargs) except Exception: self.log_exception( "Exception calling %s(*%s, **%s)", function, args, kwargs) raise spawned = self.sync_factory.spawn(catching_function) request = AddSpawned(spawned, function) self.q.put(request) return spawned def _add_spawned(self, request): spawned = self._other_spawned self._other_spawned = [] spawned.append((request.spawned, request.function)) # Filter out the spawned that have completed to stop memory leaks for sp, f in spawned: if not sp.ready(): self._other_spawned.append((sp, f)) def get_client_comms(self, block_name): for client_comms, blocks in list(self._client_comms.items()): if block_name in blocks: return client_comms def create_process_block(self): self.process_block = Block() # TODO: add a meta here children = OrderedDict() children["blocks"] = StringArrayMeta( description="Blocks hosted by this Process" ).make_attribute([]) children["remoteBlocks"] = StringArrayMeta( description="Blocks reachable via ClientComms" ).make_attribute([]) self.process_block.replace_endpoints(children) self.process_block.set_process_path(self, [self.name]) self.add_block(self.process_block, self) def update_block_list(self, client_comms, blocks): self.q.put(BlockList(client_comms=client_comms, blocks=blocks)) def _handle_block_list(self, request): self._client_comms[request.client_comms] = request.blocks remotes = [] for blocks in self._client_comms.values(): remotes += [b for b in blocks if b not in remotes] self.process_block["remoteBlocks"].set_value(remotes) def _handle_block_changes(self, request): """Update subscribers with changes and applies stored changes to the cached structure""" # update cached dict subscription_changes = self._block_state_cache.apply_changes( *request.changes) # Send out the changes for subscription, changes in subscription_changes.items(): if subscription.delta: # respond with the filtered changes subscription.respond_with_delta(changes) else: # respond with the structure of everything # below the endpoint d = self._block_state_cache.walk_path(subscription.endpoint) subscription.respond_with_update(d) def report_changes(self, *changes): self.q.put(BlockChanges(changes=list(changes))) def block_respond(self, response, response_queue): self.q.put(BlockRespond(response, response_queue)) def _handle_block_respond(self, request): """Push the response to the required queue""" request.response_queue.put(request.response) def add_block(self, block, controller): """Add a block to be hosted by this process Args: block (Block): The block to be added controller (Controller): Its controller """ path = block.process_path assert len(path) == 1, \ "Expected block %r to have %r as parent, got path %r" % \ (block, self, path) name = path[0] assert name not in self._blocks, \ "There is already a block called %r" % name request = BlockAdd(block=block, controller=controller, name=name) if self._recv_spawned: # Started, so call in Process thread self.q.put(request) else: # Not started yet so we are safe to add in this thread self._handle_block_add(request) def _handle_block_add(self, request): """Add a block to be hosted by this process""" assert request.name not in self._blocks, \ "There is already a block called %r" % request.name self._blocks[request.name] = request.block self._controllers[request.name] = request.controller serialized = request.block.to_dict() change_request = BlockChanges([[[request.name], serialized]]) self._handle_block_changes(change_request) # Regenerate list of blocks self.process_block["blocks"].set_value(list(self._blocks)) def get_block(self, block_name): try: return self._blocks[block_name] except KeyError: if block_name in self.process_block.remoteBlocks: return self.make_client_block(block_name) else: raise def make_client_block(self, block_name): params = ClientController.MethodMeta.prepare_input_map( mri=block_name) controller = ClientController(self, {}, params) return controller.block def get_controller(self, block_name): return self._controllers[block_name] def _handle_subscribe(self, request): """Add a new subscriber and respond with the current sub-structure state""" key = request.generate_key() assert key not in self._subscriptions, \ "Subscription on %s already exists" % (key,) self._subscriptions[key] = request self._block_state_cache.add_subscriber(request, request.endpoint) d = self._block_state_cache.walk_path(request.endpoint) self.log_debug("Initial subscription value %s", d) if request.delta: request.respond_with_delta([[[], d]]) else: request.respond_with_update(d) def _handle_unsubscribe(self, request): """Remove a subscriber and respond with success or error""" key = request.generate_key() try: subscription = self._subscriptions.pop(key) except KeyError: request.respond_with_error( "No subscription found for %s" % (key,)) else: self._block_state_cache.remove_subscriber( subscription, subscription.endpoint) request.respond_with_return() def _handle_get(self, request): d = self._block_state_cache.walk_path(request.endpoint) request.respond_with_return(d)
class PandABoxTablePart(PandABoxFieldPart): """This will normally be instantiated by the PandABox assembly, not created in yaml""" def __init__(self, process, control, meta, block_name, field_name, writeable): super(PandABoxTablePart, self).__init__( process, control, meta, block_name, field_name, writeable) # Fill in the meta object with the correct headers columns = OrderedDict() self.fields = OrderedDict() fields = control.get_table_fields(block_name, field_name) for field_name, (bits_hi, bits_lo) in fields.items(): nbits = bits_hi - bits_lo + 1 if nbits < 1: raise ValueError("Bad bits %s:%s" % (bits_hi, bits_lo)) if nbits == 1: column_meta = BooleanArrayMeta(field_name) widget_tag = widget("checkbox") else: if nbits <= 8: dtype = "uint8" elif nbits <= 16: dtype = "uint16" elif nbits <= 32: dtype = "uint32" elif nbits <= 64: dtype = "uint64" else: raise ValueError("Bad bits %s:%s" % (bits_hi, bits_lo)) column_meta = NumberArrayMeta(dtype, field_name) widget_tag = widget("textinput") label, column_name = make_label_attr_name(field_name) column_meta.set_label(label) column_meta.set_tags([widget_tag]) columns[column_name] = column_meta self.fields[column_name] = (bits_hi, bits_lo) meta.set_elements(TableElementMap(columns)) def set_field(self, value): int_values = self.list_from_table(value) self.control.set_table(self.block_name, self.field_name, int_values) def _calc_nconsume(self): max_bits_hi = max(self.fields.values())[0] nconsume = int((max_bits_hi + 31) / 32) return nconsume def list_from_table(self, table): int_values = [] if self.fields: nconsume = self._calc_nconsume() for row in range(len(table[list(self.fields)[0]])): int_value = 0 for name, (bits_hi, bits_lo) in self.fields.items(): max_value = 2 ** (bits_hi - bits_lo + 1) field_value = int(table[name][row]) assert field_value < max_value, \ "Expected %s[%d] < %s, got %s" % ( name, row, max_value, field_value) int_value |= field_value << bits_lo # Split the big int into 32-bit numbers for i in range(nconsume): int_values.append(int_value & (2 ** 32 - 1)) int_value = int_value >> 32 return int_values def table_from_list(self, int_values): table = Table(self.meta) if self.fields: nconsume = self._calc_nconsume() for i in range(int(len(int_values) / nconsume)): int_value = 0 for c in range(nconsume): int_value += int(int_values[i*nconsume+c]) << (32 * c) row = [] for name, (bits_hi, bits_lo) in self.fields.items(): mask = 2 ** (bits_hi + 1) - 1 field_value = (int_value & mask) >> bits_lo row.append(field_value) table.append(row) return table